diff --git a/.gitignore b/.gitignore index d351ed2..49c3d08 100644 --- a/.gitignore +++ b/.gitignore @@ -18,5 +18,6 @@ pvt.yaml # Zig build artifacts tui/zig-out/ tui/.zig-cache/ +tui/target/ vitui.md diff --git a/README.md b/README.md index 46fab96..01d3998 100644 --- a/README.md +++ b/README.md @@ -21,18 +21,83 @@ Pre-flight validation, cluster status, and lifecycle orchestration for Talos clu go install github.com/OneNoted/pvt@latest ``` +## TUI build + +The interactive `pvt tui` dashboard now uses a Rust + Ratatui binary named `vitui`. + +Build it from the repository root with: + +```bash +cd tui +cargo build --release +``` + +`pvt tui` will look for `vitui` next to the `pvt` binary, then in `tui/target/release/` +and `tui/target/debug/` relative to the current working directory, the `pvt` binary +directory, and its parent directory. Set `PVT_VITUI_BIN` to use an explicit binary path. + +If your system installs helper binaries outside standard locations, you can also override: + +- `PVT_KUBECTL_BIN` +- `PVT_TALOSCTL_BIN` +- `PVT_CURL_BIN` + ## Usage ```bash pvt config init # generate starter config pvt config validate # validate config syntax +pvt doctor # diagnose local config, helper tools, and API access pvt status summary # per-node cluster overview +pvt drift # compare pvt.yaml with live Proxmox/Talos state +pvt plan remediate # print known remediation commands for drift pvt validate vms # pre-flight VM checks pvt validate vm # check a single VM +pvt backups stale # list stale Proxmox backups +pvt node reboot # plan a safe node reboot +pvt machineconfig diff --against # normalized Talos machine config diff pvt bootstrap # apply machine configs + bootstrap etcd pvt upgrade --image # rolling Talos upgrade across all nodes ``` +### Doctor, Drift, and Plans + +`pvt doctor` checks config discovery, config parsing, helper binaries, Talos and +Kubernetes config files, and Proxmox API reachability. `pvt drift` uses the same +Go health snapshot engine as `pvt status summary` to surface VM, Talos, and +validation drift. `pvt plan remediate` prints known fix commands, but does not +apply them. + +```bash +pvt doctor +pvt drift +pvt plan remediate +``` + +### Node Lifecycle + +Node lifecycle commands are plan-first. `drain` and `reboot` can be run with +`--execute`; `add`, `replace`, and `remove` print the ordered operational plan +for review. + +```bash +pvt node drain worker-1 +pvt node reboot worker-1 --execute +pvt node replace old-worker --replacement new-worker +``` + +### Backups + +The backups commands inspect Proxmox storage that supports backup content and +only include backups whose VMID matches a node in `pvt.yaml`. Pruning is a dry +run unless `--execute` is provided. + +```bash +pvt backups list +pvt backups stale --older-than-days 30 +pvt backups prune --older-than-days 30 +``` + ### Bootstrap Applies Talos machine configs and bootstraps etcd for a new cluster. Nodes must already be booted with the Talos ISO in maintenance mode. @@ -51,6 +116,8 @@ Upgrades Talos on all nodes one at a time: workers first, then control plane nod ```bash pvt upgrade --image ghcr.io/siderolabs/installer:v1.12.5 +pvt upgrade preflight --image ghcr.io/siderolabs/installer:v1.12.5 +pvt upgrade postflight --image ghcr.io/siderolabs/installer:v1.12.5 pvt upgrade --image --dry-run # preview upgrade plan pvt upgrade --image --stage # stage upgrade, reboot later pvt upgrade --image --force # skip pre-flight health check @@ -71,7 +138,7 @@ proxmox: endpoint: "https://pve.local:8006" token_id: "pvt@pam!automation" token_secret: "${PVT_PVE_TOKEN}" - tls_verify: false + tls_verify: false # only for self-signed lab setups; prefer true talos: config_path: "~/talos/mycluster/talosconfig" @@ -110,6 +177,6 @@ Findings include the corresponding `qm set` fix command. - [x] Cluster status overview - [x] Bootstrap orchestration - [x] Rolling upgrades -- [ ] Node lifecycle management -- [ ] Drift detection -- [ ] TUI dashboard +- [x] Node lifecycle management +- [x] Drift detection +- [x] Rust Ratatui TUI dashboard diff --git a/cmd/backups.go b/cmd/backups.go new file mode 100644 index 0000000..7f5a230 --- /dev/null +++ b/cmd/backups.go @@ -0,0 +1,174 @@ +package cmd + +import ( + "fmt" + "os" + "time" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/backups" + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/proxmox" + "github.com/OneNoted/pvt/internal/ui" +) + +var backupsOlderThanDays int +var backupsExecute bool + +const maxBackupRetentionDays = 36500 + +var backupsCmd = &cobra.Command{ + Use: "backups", + Short: "Inspect and manage Proxmox backup retention", +} + +var backupsListCmd = &cobra.Command{ + Use: "list", + Short: "List Proxmox backup entries", + RunE: runBackupsList, +} + +var backupsStaleCmd = &cobra.Command{ + Use: "stale", + Short: "List Proxmox backup entries older than the retention threshold", + RunE: runBackupsStale, +} + +var backupsPruneCmd = &cobra.Command{ + Use: "prune", + Short: "Prune stale Proxmox backups", + RunE: runBackupsPrune, +} + +func init() { + rootCmd.AddCommand(backupsCmd) + backupsCmd.AddCommand(backupsListCmd) + backupsCmd.AddCommand(backupsStaleCmd) + backupsCmd.AddCommand(backupsPruneCmd) + + for _, command := range []*cobra.Command{backupsStaleCmd, backupsPruneCmd} { + command.Flags().IntVar(&backupsOlderThanDays, "older-than-days", 30, "backup age threshold in days") + } + backupsPruneCmd.Flags().BoolVar(&backupsExecute, "execute", false, "delete stale backups instead of printing the plan") +} + +func runBackupsList(cmd *cobra.Command, args []string) error { + _, cfg, err := loadConfig() + if err != nil { + return err + } + ctx, cancel := liveContext() + defer cancel() + entries, errs := backups.List(ctx, cfg) + printBackupErrors(errs) + printBackups(entries) + return nil +} + +func runBackupsStale(cmd *cobra.Command, args []string) error { + _, cfg, err := loadConfig() + if err != nil { + return err + } + retention, err := backupRetention() + if err != nil { + return err + } + ctx, cancel := liveContext() + defer cancel() + entries, errs := backups.List(ctx, cfg) + printBackupErrors(errs) + printBackups(backups.Stale(entries, retention, time.Now())) + return nil +} + +func runBackupsPrune(cmd *cobra.Command, args []string) error { + _, cfg, err := loadConfig() + if err != nil { + return err + } + retention, err := backupRetention() + if err != nil { + return err + } + + ctx, cancel := liveContext() + defer cancel() + stale, errs := backups.List(ctx, cfg) + printBackupErrors(errs) + stale = backups.Stale(stale, retention, time.Now()) + if len(stale) == 0 { + fmt.Println("No stale backups found.") + return nil + } + if !backupsExecute { + printBackups(stale) + fmt.Println("Dry run. Re-run with --execute to delete these backups.") + return nil + } + + clients := proxmoxClientsByName(cfg) + for _, entry := range stale { + client := clients[entry.Cluster] + if client == nil { + return fmt.Errorf("%s: proxmox client unavailable", entry.Cluster) + } + fmt.Printf("Deleting %s on %s/%s\n", entry.VolID, entry.Node, entry.Storage) + if err := client.DeleteBackup(ctx, entry.BackupEntry); err != nil { + return err + } + } + return nil +} + +func backupRetention() (time.Duration, error) { + if backupsOlderThanDays < 1 { + return 0, fmt.Errorf("--older-than-days must be at least 1") + } + if backupsOlderThanDays > maxBackupRetentionDays { + return 0, fmt.Errorf("--older-than-days must be at most %d", maxBackupRetentionDays) + } + return time.Duration(backupsOlderThanDays) * 24 * time.Hour, nil +} + +func printBackups(entries []backups.Entry) { + tbl := ui.NewTable("Cluster", "Node", "Storage", "VMID", "Age", "Size", "VolID") + now := time.Now() + for _, entry := range entries { + ui.AddRow(tbl, + entry.Cluster, + entry.Node, + entry.Storage, + fmt.Sprintf("%d", entry.VMID), + fmt.Sprintf("%dd", backups.AgeDays(entry, now)), + fmt.Sprintf("%d", entry.Size), + entry.VolID, + ) + } + tbl.Render(os.Stdout) +} + +func printBackupErrors(errs []string) { + for _, err := range errs { + fmt.Fprintf(os.Stderr, "Warning: %s\n", err) + } +} + +func proxmoxClientsByName(cfg *config.Config) map[string]*proxmox.Client { + pxByName := map[string]config.ProxmoxCluster{} + for _, cluster := range cfg.Proxmox.Clusters { + pxByName[cluster.Name] = cluster + } + out := map[string]*proxmox.Client{} + for _, cluster := range cfg.Clusters { + if out[cluster.Name] != nil { + continue + } + client, err := proxmox.NewClient(pxByName[cluster.ProxmoxCluster]) + if err == nil { + out[cluster.Name] = client + } + } + return out +} diff --git a/cmd/backups_test.go b/cmd/backups_test.go new file mode 100644 index 0000000..9620cb6 --- /dev/null +++ b/cmd/backups_test.go @@ -0,0 +1,35 @@ +package cmd + +import "testing" + +func TestBackupRetentionRejectsZeroAndNegativeValues(t *testing.T) { + prev := backupsOlderThanDays + t.Cleanup(func() { backupsOlderThanDays = prev }) + + for _, value := range []int{0, -1} { + backupsOlderThanDays = value + if _, err := backupRetention(); err == nil { + t.Fatalf("backupRetention() with %d days returned nil error", value) + } + } +} + +func TestBackupRetentionRejectsUnreasonablyLargeValues(t *testing.T) { + prev := backupsOlderThanDays + t.Cleanup(func() { backupsOlderThanDays = prev }) + + backupsOlderThanDays = maxBackupRetentionDays + 1 + if _, err := backupRetention(); err == nil { + t.Fatal("backupRetention() returned nil error for excessive retention days") + } +} + +func TestBackupRetentionAcceptsPositiveValues(t *testing.T) { + prev := backupsOlderThanDays + t.Cleanup(func() { backupsOlderThanDays = prev }) + + backupsOlderThanDays = 1 + if got, err := backupRetention(); err != nil || got == 0 { + t.Fatalf("backupRetention() = %s, %v; want positive duration", got, err) + } +} diff --git a/cmd/doctor.go b/cmd/doctor.go new file mode 100644 index 0000000..b94b4e0 --- /dev/null +++ b/cmd/doctor.go @@ -0,0 +1,43 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/doctor" + "github.com/OneNoted/pvt/internal/ui" +) + +var doctorCmd = &cobra.Command{ + Use: "doctor", + Short: "Diagnose local pvt configuration and tool access", + RunE: runDoctor, +} + +func init() { + rootCmd.AddCommand(doctorCmd) +} + +func runDoctor(cmd *cobra.Command, args []string) error { + ctx, cancel := liveContext() + defer cancel() + checks := doctor.Run(ctx, cfgFile) + + tbl := ui.NewTable("Severity", "Check", "Status", "Detail") + for _, check := range checks { + status := "OK" + if !check.OK { + status = "FAIL" + } + ui.AddRow(tbl, check.Severity.String(), check.Name, status, check.Detail) + } + tbl.Render(os.Stdout) + fmt.Println(doctor.Summary(checks)) + + if doctor.HasErrors(checks) { + return fmt.Errorf("doctor found error-level failures") + } + return nil +} diff --git a/cmd/drift.go b/cmd/drift.go new file mode 100644 index 0000000..bf4bf19 --- /dev/null +++ b/cmd/drift.go @@ -0,0 +1,49 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/drift" + "github.com/OneNoted/pvt/internal/health" + "github.com/OneNoted/pvt/internal/ui" +) + +var driftCmd = &cobra.Command{ + Use: "drift", + Short: "Detect drift between pvt config and live cluster state", + RunE: runDrift, +} + +func init() { + rootCmd.AddCommand(driftCmd) +} + +func runDrift(cmd *cobra.Command, args []string) error { + cfgPath, cfg, err := loadConfig() + if err != nil { + return err + } + + ctx, cancel := liveContext() + defer cancel() + snapshot := health.Gather(ctx, cfgPath, cfg) + findings := drift.Detect(snapshot) + if len(findings) == 0 { + fmt.Println("No drift detected.") + return nil + } + + tbl := ui.NewTable("Severity", "Cluster", "Node", "Kind", "Message", "Fix") + for _, finding := range findings { + ui.AddRow(tbl, finding.Severity.String(), finding.Cluster, finding.Node, finding.Kind, finding.Message, finding.Fix) + } + tbl.Render(os.Stdout) + + if drift.HasErrors(findings) { + return fmt.Errorf("drift detected") + } + return nil +} diff --git a/cmd/helpers.go b/cmd/helpers.go new file mode 100644 index 0000000..0576c65 --- /dev/null +++ b/cmd/helpers.go @@ -0,0 +1,28 @@ +package cmd + +import ( + "context" + "time" + + "github.com/OneNoted/pvt/internal/config" +) + +func loadConfig() (string, *config.Config, error) { + path := cfgFile + if path == "" { + discovered, err := config.Discover() + if err != nil { + return "", nil, err + } + path = discovered + } + cfg, err := config.Load(path) + if err != nil { + return "", nil, err + } + return path, cfg, nil +} + +func liveContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), 30*time.Second) +} diff --git a/cmd/machineconfig.go b/cmd/machineconfig.go new file mode 100644 index 0000000..93dcf5f --- /dev/null +++ b/cmd/machineconfig.go @@ -0,0 +1,81 @@ +package cmd + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/machineconfig" +) + +var machineConfigCmd = &cobra.Command{ + Use: "machineconfig", + Short: "Inspect Talos machine config files", +} + +var machineConfigDiffCmd = &cobra.Command{ + Use: "diff [node]", + Short: "Diff configured machine config files against another file or directory", + Args: cobra.MaximumNArgs(1), + RunE: runMachineConfigDiff, +} + +var machineConfigAgainst string + +func init() { + rootCmd.AddCommand(machineConfigCmd) + machineConfigCmd.AddCommand(machineConfigDiffCmd) + machineConfigDiffCmd.Flags().StringVar(&machineConfigAgainst, "against", "", "file or directory to compare against") + _ = machineConfigDiffCmd.MarkFlagRequired("against") +} + +func runMachineConfigDiff(cmd *cobra.Command, args []string) error { + _, cfg, err := loadConfig() + if err != nil { + return err + } + + foundDiff := false + matched := false + for _, cluster := range cfg.Clusters { + for _, node := range cluster.Nodes { + if len(args) > 0 && args[0] != node.Name { + continue + } + matched = true + left := machineconfig.ResolvePath(cluster.ConfigSource, cluster.Name, node.Name) + right := comparePath(machineConfigAgainst, left) + lines, different, err := machineconfig.DiffFiles(left, right) + if err != nil { + return err + } + if !different { + fmt.Printf("%s: no diff\n", node.Name) + continue + } + foundDiff = true + fmt.Printf("%s: %s -> %s\n", node.Name, left, right) + for _, line := range lines { + fmt.Fprintln(os.Stdout, line) + } + } + } + + if len(args) > 0 && !matched { + return fmt.Errorf("node %q not found in config", args[0]) + } + if len(args) > 0 && !foundDiff { + fmt.Printf("%s: no diff\n", args[0]) + } + return nil +} + +func comparePath(against, configured string) string { + info, err := os.Stat(against) + if err == nil && info.IsDir() { + return filepath.Join(against, filepath.Base(configured)) + } + return against +} diff --git a/cmd/node.go b/cmd/node.go new file mode 100644 index 0000000..e4f7e9f --- /dev/null +++ b/cmd/node.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "strings" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/nodeops" + "github.com/OneNoted/pvt/internal/ui" +) + +var nodeExecute bool +var nodeReplacement string + +var nodeCmd = &cobra.Command{ + Use: "node", + Short: "Plan and run node lifecycle operations", +} + +func init() { + rootCmd.AddCommand(nodeCmd) + for _, action := range []string{"add", "drain", "reboot", "remove", "replace"} { + action := action + command := &cobra.Command{ + Use: action + " [node]", + Short: action + " a configured node", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runNodeAction(action, args[0]) + }, + } + command.Flags().BoolVar(&nodeExecute, "execute", false, "execute supported commands instead of printing the plan") + if action == "replace" { + command.Flags().StringVar(&nodeReplacement, "replacement", "", "configured replacement node name") + } + nodeCmd.AddCommand(command) + } +} + +func runNodeAction(action, nodeName string) error { + _, cfg, err := loadConfig() + if err != nil { + return err + } + + cluster, node, err := findConfiguredNode(cfg, nodeName) + if err != nil { + return err + } + + steps := nodeops.Plan(action, cluster, node, nodeReplacement) + if len(steps) == 0 { + return fmt.Errorf("unknown node action %q", action) + } + + tbl := ui.NewTable("Step", "Command", "Detail") + for _, step := range steps { + ui.AddRow(tbl, fmt.Sprintf("%d", step.Order), step.Command, step.Detail) + } + tbl.Render(os.Stdout) + + if !nodeExecute { + fmt.Println("Plan only. Re-run with --execute for supported direct actions.") + return nil + } + if action != "drain" && action != "reboot" { + return fmt.Errorf("%s is plan-only in this version", action) + } + for _, step := range steps { + if err := runShellStep(step); err != nil { + return err + } + } + return nil +} + +func findConfiguredNode(cfg *config.Config, name string) (config.ClusterConfig, config.NodeConfig, error) { + for _, cluster := range cfg.Clusters { + for _, node := range cluster.Nodes { + if node.Name == name { + return cluster, node, nil + } + } + } + return config.ClusterConfig{}, config.NodeConfig{}, fmt.Errorf("node %q not found in config", name) +} + +func runShellStep(step nodeops.Step) error { + if len(step.Args) == 0 { + return nil + } + for _, arg := range step.Args[1:] { + if strings.ContainsAny(arg, "\t\n\r") { + return fmt.Errorf("refusing to execute command with control whitespace in argument %q", arg) + } + } + cmd := exec.Command(step.Args[0], step.Args[1:]...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return cmd.Run() +} diff --git a/cmd/plan.go b/cmd/plan.go new file mode 100644 index 0000000..099ccd2 --- /dev/null +++ b/cmd/plan.go @@ -0,0 +1,52 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/OneNoted/pvt/internal/drift" + "github.com/OneNoted/pvt/internal/health" + "github.com/OneNoted/pvt/internal/ui" +) + +var planCmd = &cobra.Command{ + Use: "plan", + Short: "Generate safe operational plans", +} + +var planRemediateCmd = &cobra.Command{ + Use: "remediate", + Short: "Print remediation commands for known drift findings", + RunE: runPlanRemediate, +} + +func init() { + rootCmd.AddCommand(planCmd) + planCmd.AddCommand(planRemediateCmd) +} + +func runPlanRemediate(cmd *cobra.Command, args []string) error { + cfgPath, cfg, err := loadConfig() + if err != nil { + return err + } + + ctx, cancel := liveContext() + defer cancel() + snapshot := health.Gather(ctx, cfgPath, cfg) + findings := drift.Remediations(drift.Detect(snapshot)) + if len(findings) == 0 { + fmt.Println("No known remediations available.") + return nil + } + + tbl := ui.NewTable("Cluster", "Node", "Kind", "Command") + for _, finding := range findings { + ui.AddRow(tbl, finding.Cluster, finding.Node, finding.Kind, finding.Fix) + } + tbl.Render(os.Stdout) + fmt.Println("Plan only. Review commands before applying them on the appropriate Proxmox or workstation host.") + return nil +} diff --git a/cmd/root.go b/cmd/root.go index 618753e..46dd1ef 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,8 +1,8 @@ package cmd import ( - "fmt" "os" + "path/filepath" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -33,15 +33,8 @@ func init() { func initConfig() { if cfgFile != "" { viper.SetConfigFile(cfgFile) - } else { - viper.SetConfigName("pvt") - viper.SetConfigType("yaml") - viper.AddConfigPath(".") - - home, err := os.UserHomeDir() - if err == nil { - viper.AddConfigPath(fmt.Sprintf("%s/.config/pvt", home)) - } + } else if discovered := discoverConfigFile(); discovered != "" { + viper.SetConfigFile(discovered) } viper.SetEnvPrefix("PVT") @@ -49,3 +42,35 @@ func initConfig() { _ = viper.ReadInConfig() } + +func discoverConfigFile() string { + if env := os.Getenv("PVT_CONFIG"); env != "" { + if _, err := os.Stat(env); err == nil { + return env + } + } + + if _, err := os.Stat("pvt.yaml"); err == nil { + if abs, err := filepath.Abs("pvt.yaml"); err == nil { + return abs + } + return "pvt.yaml" + } + + home, err := os.UserHomeDir() + if err != nil { + return "" + } + + paths := []string{ + filepath.Join(home, ".config", "pvt", "config.yaml"), + filepath.Join(home, ".config", "pvt", "pvt.yaml"), + } + for _, path := range paths { + if _, err := os.Stat(path); err == nil { + return path + } + } + + return "" +} diff --git a/cmd/root_test.go b/cmd/root_test.go new file mode 100644 index 0000000..e605673 --- /dev/null +++ b/cmd/root_test.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "os" + "path/filepath" + "testing" +) + +func TestDiscoverConfigFilePrefersExplicitEnv(t *testing.T) { + tmp := t.TempDir() + home := filepath.Join(tmp, "home") + explicit := filepath.Join(tmp, "explicit.yaml") + + if err := os.MkdirAll(filepath.Join(home, ".config", "pvt"), 0o755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(explicit, []byte("version: \"1\"\n"), 0o644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(home, ".config", "pvt", "config.yaml"), []byte("version: \"1\"\n"), 0o644); err != nil { + t.Fatal(err) + } + + t.Setenv("HOME", home) + t.Setenv("PVT_CONFIG", explicit) + + got := discoverConfigFile() + if got != explicit { + t.Fatalf("expected explicit config %q, got %q", explicit, got) + } +} + +func TestDiscoverConfigFilePrefersLocalRepoConfig(t *testing.T) { + tmp := t.TempDir() + home := filepath.Join(tmp, "home") + repo := filepath.Join(tmp, "repo") + + if err := os.MkdirAll(filepath.Join(home, ".config", "pvt"), 0o755); err != nil { + t.Fatal(err) + } + if err := os.MkdirAll(repo, 0o755); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(home, ".config", "pvt", "config.yaml"), []byte("home\n"), 0o644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(repo, "pvt.yaml"), []byte("repo\n"), 0o644); err != nil { + t.Fatal(err) + } + + prev, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + _ = os.Chdir(prev) + }) + if err := os.Chdir(repo); err != nil { + t.Fatal(err) + } + + t.Setenv("HOME", home) + t.Setenv("PVT_CONFIG", "") + + got := discoverConfigFile() + want := filepath.Join(repo, "pvt.yaml") + if got != want { + t.Fatalf("expected local config %q, got %q", want, got) + } +} + +func TestDiscoverConfigFilePrefersHomeConfigYamlOverLegacyPvtYaml(t *testing.T) { + tmp := t.TempDir() + home := filepath.Join(tmp, "home") + + if err := os.MkdirAll(filepath.Join(home, ".config", "pvt"), 0o755); err != nil { + t.Fatal(err) + } + configPath := filepath.Join(home, ".config", "pvt", "config.yaml") + legacyPath := filepath.Join(home, ".config", "pvt", "pvt.yaml") + if err := os.WriteFile(configPath, []byte("config\n"), 0o644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(legacyPath, []byte("legacy\n"), 0o644); err != nil { + t.Fatal(err) + } + + prev, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + _ = os.Chdir(prev) + }) + if err := os.Chdir(tmp); err != nil { + t.Fatal(err) + } + + t.Setenv("HOME", home) + t.Setenv("PVT_CONFIG", "") + + got := discoverConfigFile() + if got != configPath { + t.Fatalf("expected home config %q, got %q", configPath, got) + } +} diff --git a/cmd/status.go b/cmd/status.go index bf394a7..0df77fd 100644 --- a/cmd/status.go +++ b/cmd/status.go @@ -1,14 +1,12 @@ package cmd import ( - "context" "fmt" "os" "github.com/spf13/cobra" - "github.com/OneNoted/pvt/internal/config" - "github.com/OneNoted/pvt/internal/talos" + "github.com/OneNoted/pvt/internal/health" "github.com/OneNoted/pvt/internal/ui" ) @@ -32,107 +30,43 @@ func init() { } func runStatusSummary(cmd *cobra.Command, args []string) error { - cfgPath, err := config.Discover() + cfgPath, cfg, err := loadConfig() if err != nil { return err } - cfg, err := config.Load(cfgPath) - if err != nil { - return err - } - - ctx := context.Background() - - for _, cluster := range cfg.Clusters { + ctx, cancel := liveContext() + defer cancel() + snapshot := health.Gather(ctx, cfgPath, cfg) + for _, cluster := range snapshot.Clusters { fmt.Printf("Cluster: %s (%s)\n\n", cluster.Name, cluster.Endpoint) + tbl := ui.NewTable("Name", "Role", "IP", "PVE Node", "VMID", "Talos Version", "VM Status") - // Collect endpoints from node IPs for querying - var cpEndpoints []string - for _, n := range cluster.Nodes { - if n.Role == "controlplane" { - cpEndpoints = append(cpEndpoints, n.IP) + for _, node := range cluster.Nodes { + version := node.TalosVersion + if version == "" { + version = "unknown" } + vmStatus := node.VMStatus + if vmStatus == "" { + vmStatus = "unknown" + } + ui.AddRow(tbl, + node.Config.Name, + node.Config.Role, + node.Config.IP, + node.Config.ProxmoxNode, + fmt.Sprintf("%d", node.Config.ProxmoxVMID), + version, + vmStatus, + ) } - - if len(cpEndpoints) == 0 { - fmt.Println(" No control plane nodes configured") - continue - } - - tc, err := talos.NewClient(ctx, cfg.Talos.ConfigPath, cfg.Talos.Context, cpEndpoints) - if err != nil { - fmt.Fprintf(os.Stderr, " Warning: could not connect to Talos API: %v\n", err) - printOfflineTable(cluster) - continue - } - defer tc.Close() - - // Get all node IPs to query - var allNodes []string - for _, n := range cluster.Nodes { - allNodes = append(allNodes, n.IP) + tbl.Render(os.Stdout) + for _, err := range cluster.Errors { + fmt.Fprintf(os.Stderr, " Warning: %s\n", err) } - - printNodeTable(ctx, tc, cluster, allNodes) fmt.Println() } return nil } - -func printNodeTable(ctx context.Context, tc *talos.Client, cluster config.ClusterConfig, allNodes []string) { - tbl := ui.NewTable("Name", "Role", "IP", "PVE Node", "VMID", "Talos Version", "Status") - - // Try to get versions for all nodes — index by both hostname and IP - versions, vErr := tc.Version(ctx, allNodes...) - versionMap := make(map[string]string) - if vErr == nil { - for _, v := range versions { - versionMap[v.Node] = v.TalosVersion - versionMap[v.Endpoint] = v.TalosVersion - } - } - - for _, node := range cluster.Nodes { - ver := "unknown" - status := "unreachable" - - if v, ok := versionMap[node.Name]; ok { - ver = v - status = "ready" - } else if v, ok := versionMap[node.IP]; ok { - ver = v - status = "ready" - } - - ui.AddRow(tbl, - node.Name, - node.Role, - node.IP, - node.ProxmoxNode, - fmt.Sprintf("%d", node.ProxmoxVMID), - ver, - status, - ) - } - - tbl.Render(os.Stdout) -} - -func printOfflineTable(cluster config.ClusterConfig) { - tbl := ui.NewTable("Name", "Role", "IP", "PVE Node", "VMID", "Status") - - for _, node := range cluster.Nodes { - ui.AddRow(tbl, - node.Name, - node.Role, - node.IP, - node.ProxmoxNode, - fmt.Sprintf("%d", node.ProxmoxVMID), - "offline", - ) - } - - tbl.Render(os.Stdout) -} diff --git a/cmd/tui.go b/cmd/tui.go index 895cf58..e299253 100644 --- a/cmd/tui.go +++ b/cmd/tui.go @@ -24,7 +24,7 @@ func init() { func runTUI(cmd *cobra.Command, args []string) error { binary, err := findVitui() if err != nil { - return fmt.Errorf("vitui binary not found: %w\n\nInstall vitui by running: cd tui && zig build -Doptimize=ReleaseSafe", err) + return fmt.Errorf("vitui binary not found: %w\n\nBuild the Rust TUI by running: cd tui && cargo build --release", err) } // Build vitui args, forwarding the resolved config path @@ -50,25 +50,38 @@ func runTUI(cmd *cobra.Command, args []string) error { // findVitui searches for the vitui binary in standard locations. func findVitui() (string, error) { + if override := os.Getenv("PVT_VITUI_BIN"); override != "" { + if _, err := os.Stat(override); err == nil { + return override, nil + } + } + // 1. Adjacent to the pvt binary self, err := os.Executable() + selfDir := "" if err == nil { + selfDir = filepath.Dir(self) adjacent := filepath.Join(filepath.Dir(self), "vitui") if _, err := os.Stat(adjacent); err == nil { return adjacent, nil } } - // 2. In the tui/zig-out/bin/ directory relative to working dir - local := filepath.Join("tui", "zig-out", "bin", "vitui") - if _, err := os.Stat(local); err == nil { - return local, nil + // 2. In Rust cargo target directories relative to common roots + searchRoots := []string{"."} + if selfDir != "" { + searchRoots = append(searchRoots, selfDir, filepath.Dir(selfDir)) } - - // 3. In $PATH - if p, err := exec.LookPath("vitui"); err == nil { - return p, nil + for _, root := range searchRoots { + for _, local := range []string{ + filepath.Join(root, "tui", "target", "release", "vitui"), + filepath.Join(root, "tui", "target", "debug", "vitui"), + } { + if _, err := os.Stat(local); err == nil { + return local, nil + } + } } - return "", fmt.Errorf("not in PATH, not adjacent to pvt binary, and not in tui/zig-out/bin/") + return "", fmt.Errorf("not adjacent to pvt binary and not in tui/target/{release,debug}; set PVT_VITUI_BIN to override") } diff --git a/cmd/upgrade.go b/cmd/upgrade.go index 8748d57..bb1e336 100644 --- a/cmd/upgrade.go +++ b/cmd/upgrade.go @@ -10,6 +10,7 @@ import ( "github.com/OneNoted/pvt/internal/cluster" "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/health" "github.com/OneNoted/pvt/internal/talos" "github.com/OneNoted/pvt/internal/ui" ) @@ -28,15 +29,35 @@ func init() { upgradeCmd.Flags().Bool("force", false, "force upgrade even if pre-flight fails") upgradeCmd.Flags().Bool("dry-run", false, "show upgrade plan without executing") _ = upgradeCmd.MarkFlagRequired("image") + + upgradePreflightCmd.Flags().String("image", "", "Talos installer image expected for the upgrade") + upgradePostflightCmd.Flags().String("image", "", "Talos installer image expected after the upgrade") + _ = upgradePreflightCmd.MarkFlagRequired("image") + _ = upgradePostflightCmd.MarkFlagRequired("image") + upgradeCmd.AddCommand(upgradePreflightCmd) + upgradeCmd.AddCommand(upgradePostflightCmd) } -func runUpgrade(cmd *cobra.Command, args []string) error { - cfgPath, err := config.Discover() - if err != nil { - return err - } +var upgradePreflightCmd = &cobra.Command{ + Use: "preflight [cluster-name]", + Short: "Report upgrade readiness before a rolling upgrade", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpgradeReport(cmd, args, false) + }, +} + +var upgradePostflightCmd = &cobra.Command{ + Use: "postflight [cluster-name]", + Short: "Report cluster state after a rolling upgrade", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + return runUpgradeReport(cmd, args, true) + }, +} - cfg, err := config.Load(cfgPath) +func runUpgrade(cmd *cobra.Command, args []string) error { + _, cfg, err := loadConfig() if err != nil { return err } @@ -70,8 +91,12 @@ func runUpgrade(cmd *cobra.Command, args []string) error { // Pre-flight health check (skip if --force) if !force && !dryRun { - fmt.Println("Running pre-flight health check...") - if err := tc.WaitHealthy(ctx, cpIPs, workerIPs, 30*time.Second); err != nil { + healthTimeout := clusterCfg.Upgrade.HealthCheckTimeout + if healthTimeout == 0 { + healthTimeout = 30 * time.Second + } + fmt.Printf("Running pre-flight health check (timeout: %s)...\n", healthTimeout) + if err := tc.WaitHealthy(ctx, cpIPs, workerIPs, healthTimeout); err != nil { return fmt.Errorf("pre-flight health check failed (use --force to skip): %w", err) } fmt.Println() @@ -243,3 +268,83 @@ func waitForNode(ctx context.Context, tc *talos.Client, nodeIP string, timeout t return fmt.Errorf("node %s did not respond within %s", nodeIP, timeout) } + +func runUpgradeReport(cmd *cobra.Command, args []string, postflight bool) error { + cfgPath, cfg, err := loadConfig() + if err != nil { + return err + } + clusterCfg, err := resolveCluster(cfg, args) + if err != nil { + return err + } + image, _ := cmd.Flags().GetString("image") + expectedVersion := installerTag(image) + + ctx, cancel := liveContext() + defer cancel() + snapshot := healthForUpgrade(ctx, cfgPath, cfg, clusterCfg.Name) + title := "Upgrade preflight" + if postflight { + title = "Upgrade postflight" + } + fmt.Printf("%s for %q\n", title, clusterCfg.Name) + fmt.Printf("Image: %s\n", image) + if expectedVersion != "" { + fmt.Printf("Expected Talos version: %s\n", expectedVersion) + } + fmt.Println() + + tbl := ui.NewTable("Node", "Role", "VM", "Talos", "Status") + failed := false + for _, cluster := range snapshot.Clusters { + for _, node := range cluster.Nodes { + status, nodeFailed := upgradeReportNodeStatus(node, postflight, expectedVersion) + failed = failed || nodeFailed + ui.AddRow(tbl, node.Config.Name, node.Config.Role, node.VMStatus, node.TalosVersion, status) + } + } + tbl.Render(os.Stdout) + + if postflight { + if failed { + return fmt.Errorf("upgrade postflight validation failed") + } + return nil + } + fmt.Println("Preflight is advisory. The upgrade command still performs its own health gate unless --force is used.") + return nil +} + +func upgradeReportNodeStatus(node health.NodeSnapshot, postflight bool, expectedVersion string) (string, bool) { + if node.VMStatus != "" && node.VMStatus != "running" { + return "vm " + node.VMStatus, postflight + } + if node.TalosVersion == "" { + return "talos unavailable", postflight + } + if postflight && expectedVersion != "" && node.TalosVersion != expectedVersion { + return "version mismatch", true + } + return "ready", false +} + +func healthForUpgrade(ctx context.Context, cfgPath string, cfg *config.Config, clusterName string) health.Snapshot { + filtered := *cfg + filtered.Clusters = nil + for _, cluster := range cfg.Clusters { + if cluster.Name == clusterName { + filtered.Clusters = append(filtered.Clusters, cluster) + } + } + return health.Gather(ctx, cfgPath, &filtered) +} + +func installerTag(image string) string { + for i := len(image) - 1; i >= 0; i-- { + if image[i] == ':' { + return image[i+1:] + } + } + return "" +} diff --git a/cmd/upgrade_test.go b/cmd/upgrade_test.go new file mode 100644 index 0000000..cad8e49 --- /dev/null +++ b/cmd/upgrade_test.go @@ -0,0 +1,49 @@ +package cmd + +import ( + "testing" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/health" +) + +func TestUpgradeReportNodeStatusPostflightFailures(t *testing.T) { + tests := []struct { + name string + node health.NodeSnapshot + want string + }{ + { + name: "version mismatch", + node: health.NodeSnapshot{Config: config.NodeConfig{Name: "cp-1"}, VMStatus: "running", TalosVersion: "v1.0.0"}, + want: "version mismatch", + }, + { + name: "talos unavailable", + node: health.NodeSnapshot{Config: config.NodeConfig{Name: "cp-1"}, VMStatus: "running"}, + want: "talos unavailable", + }, + { + name: "vm stopped", + node: health.NodeSnapshot{Config: config.NodeConfig{Name: "cp-1"}, VMStatus: "stopped", TalosVersion: "v1.2.0"}, + want: "vm stopped", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, failed := upgradeReportNodeStatus(tt.node, true, "v1.2.0") + if got != tt.want || !failed { + t.Fatalf("upgradeReportNodeStatus() = %q, %v; want %q, true", got, failed, tt.want) + } + }) + } +} + +func TestUpgradeReportNodeStatusReady(t *testing.T) { + node := health.NodeSnapshot{Config: config.NodeConfig{Name: "cp-1"}, VMStatus: "running", TalosVersion: "v1.2.0"} + got, failed := upgradeReportNodeStatus(node, true, "v1.2.0") + if got != "ready" || failed { + t.Fatalf("upgradeReportNodeStatus() = %q, %v; want ready, false", got, failed) + } +} diff --git a/internal/backups/backups.go b/internal/backups/backups.go new file mode 100644 index 0000000..290d7e6 --- /dev/null +++ b/internal/backups/backups.go @@ -0,0 +1,111 @@ +package backups + +import ( + "context" + "fmt" + "sort" + "time" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/proxmox" +) + +// Entry is a Proxmox backup annotated with cluster context. +type Entry struct { + Cluster string + proxmox.BackupEntry +} + +// List returns all known PVE backup entries. +func List(ctx context.Context, cfg *config.Config) ([]Entry, []string) { + var entries []Entry + var errs []string + + pxByName := make(map[string]config.ProxmoxCluster) + for _, cluster := range cfg.Proxmox.Clusters { + pxByName[cluster.Name] = cluster + } + + seenNodes := map[string]map[string]bool{} + for _, cluster := range cfg.Clusters { + pxCfg, ok := pxByName[cluster.ProxmoxCluster] + if !ok { + errs = append(errs, fmt.Sprintf("%s: unknown proxmox cluster %q", cluster.Name, cluster.ProxmoxCluster)) + continue + } + configuredVMIDs := vmIDsForCluster(cluster) + client, err := proxmox.NewClient(pxCfg) + if err != nil { + errs = append(errs, fmt.Sprintf("%s: proxmox client: %v", cluster.Name, err)) + continue + } + if seenNodes[cluster.ProxmoxCluster] == nil { + seenNodes[cluster.ProxmoxCluster] = map[string]bool{} + } + for _, node := range cluster.Nodes { + if seenNodes[cluster.ProxmoxCluster][node.ProxmoxNode] { + continue + } + seenNodes[cluster.ProxmoxCluster][node.ProxmoxNode] = true + backups, err := client.ListBackups(ctx, node.ProxmoxNode) + if err != nil { + errs = append(errs, fmt.Sprintf("%s/%s: backups: %v", cluster.Name, node.ProxmoxNode, err)) + continue + } + for _, backup := range filterConfiguredBackups(backups, configuredVMIDs) { + entries = append(entries, Entry{Cluster: cluster.Name, BackupEntry: backup}) + } + } + } + + sort.Slice(entries, func(i, j int) bool { + if entries[i].Cluster != entries[j].Cluster { + return entries[i].Cluster < entries[j].Cluster + } + if entries[i].Node != entries[j].Node { + return entries[i].Node < entries[j].Node + } + return entries[i].CTime > entries[j].CTime + }) + return entries, errs +} + +func vmIDsForCluster(cluster config.ClusterConfig) map[uint64]bool { + vmIDs := make(map[uint64]bool, len(cluster.Nodes)) + for _, node := range cluster.Nodes { + if node.ProxmoxVMID > 0 { + vmIDs[uint64(node.ProxmoxVMID)] = true + } + } + return vmIDs +} + +func filterConfiguredBackups(backups []proxmox.BackupEntry, configuredVMIDs map[uint64]bool) []proxmox.BackupEntry { + filtered := make([]proxmox.BackupEntry, 0, len(backups)) + for _, backup := range backups { + if configuredVMIDs[backup.VMID] { + filtered = append(filtered, backup) + } + } + return filtered +} + +// Stale filters entries older than the given duration. +func Stale(entries []Entry, olderThan time.Duration, now time.Time) []Entry { + cutoff := now.Add(-olderThan).Unix() + out := []Entry{} + for _, entry := range entries { + if int64(entry.CTime) < cutoff { + out = append(out, entry) + } + } + return out +} + +// AgeDays returns a whole-day age for display. +func AgeDays(entry Entry, now time.Time) int { + if entry.CTime == 0 { + return 0 + } + return int(now.Sub(time.Unix(int64(entry.CTime), 0)).Hours() / 24) +} diff --git a/internal/backups/backups_test.go b/internal/backups/backups_test.go new file mode 100644 index 0000000..2af8c41 --- /dev/null +++ b/internal/backups/backups_test.go @@ -0,0 +1,56 @@ +package backups + +import ( + "testing" + "time" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/proxmox" +) + +func TestStaleFiltersByAge(t *testing.T) { + now := time.Unix(2000, 0) + entries := []Entry{ + {BackupEntry: proxmox.BackupEntry{VolID: "recent", CTime: uint64(now.Add(-2 * 24 * time.Hour).Unix())}}, + {BackupEntry: proxmox.BackupEntry{VolID: "old", CTime: uint64(now.Add(-40 * 24 * time.Hour).Unix())}}, + } + + got := Stale(entries, 30*24*time.Hour, now) + if len(got) != 1 || got[0].VolID != "old" { + t.Fatalf("Stale() = %#v, want only old backup", got) + } +} + +func TestAgeDays(t *testing.T) { + now := time.Unix(10*24*60*60, 0) + entry := Entry{BackupEntry: proxmox.BackupEntry{CTime: uint64(now.Add(-3 * 24 * time.Hour).Unix())}} + if got := AgeDays(entry, now); got != 3 { + t.Fatalf("AgeDays() = %d, want 3", got) + } +} + +func TestVMIDsForCluster(t *testing.T) { + got := vmIDsForCluster(config.ClusterConfig{ + Nodes: []config.NodeConfig{ + {Name: "cp-1", ProxmoxVMID: 100}, + {Name: "worker-1", ProxmoxVMID: 101}, + }, + }) + if !got[100] || !got[101] { + t.Fatalf("vmIDsForCluster() = %#v, want configured VMIDs", got) + } + if got[999] { + t.Fatalf("vmIDsForCluster() includes unrelated VMID") + } +} + +func TestFilterConfiguredBackupsExcludesUnmanagedVMIDs(t *testing.T) { + backups := []proxmox.BackupEntry{ + {VolID: "managed", VMID: 100}, + {VolID: "unmanaged", VMID: 999}, + } + got := filterConfiguredBackups(backups, map[uint64]bool{100: true}) + if len(got) != 1 || got[0].VolID != "managed" { + t.Fatalf("filterConfiguredBackups() = %#v, want only managed backup", got) + } +} diff --git a/internal/doctor/doctor.go b/internal/doctor/doctor.go new file mode 100644 index 0000000..4b3960d --- /dev/null +++ b/internal/doctor/doctor.go @@ -0,0 +1,209 @@ +package doctor + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/proxmox" +) + +// Severity describes how a failed check should affect the command result. +type Severity int + +const ( + SeverityInfo Severity = iota + SeverityWarn + SeverityError +) + +func (s Severity) String() string { + switch s { + case SeverityInfo: + return "INFO" + case SeverityWarn: + return "WARN" + case SeverityError: + return "ERROR" + default: + return "UNKNOWN" + } +} + +// Check is one doctor result. +type Check struct { + Name string + Severity Severity + OK bool + Detail string +} + +// HasErrors reports whether any error-level check failed. +func HasErrors(checks []Check) bool { + for _, check := range checks { + if !check.OK && check.Severity == SeverityError { + return true + } + } + return false +} + +// Run executes local and optional connectivity checks. +func Run(ctx context.Context, configPath string) []Check { + var checks []Check + if configPath == "" { + discovered, err := config.Discover() + if err != nil { + return []Check{{ + Name: "config discovery", + Severity: SeverityError, + OK: false, + Detail: err.Error(), + }} + } + configPath = discovered + } + + checks = append(checks, Check{ + Name: "config discovery", + Severity: SeverityError, + OK: true, + Detail: configPath, + }) + + cfg, err := config.Load(configPath) + if err != nil { + checks = append(checks, Check{ + Name: "config parse", + Severity: SeverityError, + OK: false, + Detail: err.Error(), + }) + return checks + } + checks = append(checks, Check{Name: "config parse", Severity: SeverityError, OK: true, Detail: "valid"}) + + checks = append(checks, binaryCheck("kubectl", "PVT_KUBECTL_BIN", SeverityWarn)) + checks = append(checks, binaryCheck("talosctl", "PVT_TALOSCTL_BIN", SeverityWarn)) + + talosPath := expandPath(cfg.Talos.ConfigPath) + _, err = os.Stat(talosPath) + checks = append(checks, Check{ + Name: "talos config", + Severity: SeverityWarn, + OK: err == nil, + Detail: detailOrErr(talosPath, err), + }) + + kubeconfig := os.Getenv("KUBECONFIG") + if kubeconfig == "" { + if home, err := os.UserHomeDir(); err == nil { + kubeconfig = filepath.Join(home, ".kube", "config") + } + } + _, err = os.Stat(kubeconfig) + checks = append(checks, Check{ + Name: "kubeconfig", + Severity: SeverityWarn, + OK: err == nil, + Detail: detailOrErr(kubeconfig, err), + }) + + for _, cluster := range cfg.Proxmox.Clusters { + checks = append(checks, Check{ + Name: "proxmox config " + cluster.Name, + Severity: SeverityError, + OK: cluster.Endpoint != "" && cluster.TokenID != "" && cluster.TokenSecret != "", + Detail: proxmoxConfigDetail(cluster), + }) + + client, err := proxmox.NewClient(cluster) + if err != nil { + checks = append(checks, Check{ + Name: "proxmox auth " + cluster.Name, + Severity: SeverityWarn, + OK: false, + Detail: err.Error(), + }) + continue + } + err = client.Ping(ctx) + checks = append(checks, Check{ + Name: "proxmox auth " + cluster.Name, + Severity: SeverityWarn, + OK: err == nil, + Detail: detailOrErr("reachable", err), + }) + } + + return checks +} + +func binaryCheck(name, env string, severity Severity) Check { + if override := os.Getenv(env); override != "" { + _, err := os.Stat(override) + return Check{Name: name, Severity: severity, OK: err == nil, Detail: detailOrErr(override, err)} + } + + path, err := exec.LookPath(name) + return Check{Name: name, Severity: severity, OK: err == nil, Detail: detailOrErr(path, err)} +} + +func proxmoxConfigDetail(cluster config.ProxmoxCluster) string { + missing := []string{} + if cluster.Endpoint == "" { + missing = append(missing, "endpoint") + } + if cluster.TokenID == "" { + missing = append(missing, "token_id") + } + if cluster.TokenSecret == "" { + missing = append(missing, "token_secret") + } + if len(missing) > 0 { + return "missing " + strings.Join(missing, ", ") + } + return cluster.Endpoint +} + +func detailOrErr(detail string, err error) string { + if err != nil { + return err.Error() + } + if detail == "" { + return "ok" + } + return detail +} + +func expandPath(path string) string { + if strings.HasPrefix(path, "~/") { + home, err := os.UserHomeDir() + if err == nil { + return filepath.Join(home, path[2:]) + } + } + return path +} + +// Summary returns a compact count string for tests and callers. +func Summary(checks []Check) string { + errors := 0 + warnings := 0 + for _, check := range checks { + if check.OK { + continue + } + switch check.Severity { + case SeverityError: + errors++ + case SeverityWarn: + warnings++ + } + } + return fmt.Sprintf("%d error(s), %d warning(s)", errors, warnings) +} diff --git a/internal/doctor/doctor_test.go b/internal/doctor/doctor_test.go new file mode 100644 index 0000000..dae626d --- /dev/null +++ b/internal/doctor/doctor_test.go @@ -0,0 +1,17 @@ +package doctor + +import "testing" + +func TestSummaryCountsFailedWarningsAndErrors(t *testing.T) { + checks := []Check{ + {Name: "ok", Severity: SeverityError, OK: true}, + {Name: "warn", Severity: SeverityWarn, OK: false}, + {Name: "err", Severity: SeverityError, OK: false}, + } + if got := Summary(checks); got != "1 error(s), 1 warning(s)" { + t.Fatalf("Summary() = %q", got) + } + if !HasErrors(checks) { + t.Fatal("HasErrors() = false, want true") + } +} diff --git a/internal/drift/drift.go b/internal/drift/drift.go new file mode 100644 index 0000000..8666a4b --- /dev/null +++ b/internal/drift/drift.go @@ -0,0 +1,105 @@ +package drift + +import ( + "fmt" + "sort" + + "github.com/OneNoted/pvt/internal/health" + "github.com/OneNoted/pvt/internal/rules" +) + +// Finding describes a detected config/live-state mismatch or risk. +type Finding struct { + Cluster string + Node string + Severity rules.Severity + Kind string + Message string + Fix string +} + +// Detect extracts drift findings from a health snapshot. +func Detect(snapshot health.Snapshot) []Finding { + var findings []Finding + for _, cluster := range snapshot.Clusters { + for _, err := range cluster.Errors { + findings = append(findings, Finding{ + Cluster: cluster.Name, + Severity: rules.SeverityWarn, + Kind: "cluster", + Message: err, + }) + } + for _, node := range cluster.Nodes { + if node.VMStatus != "" && node.VMStatus != "running" { + findings = append(findings, Finding{ + Cluster: cluster.Name, + Node: node.Config.Name, + Severity: rules.SeverityError, + Kind: "vm-status", + Message: fmt.Sprintf("VM %d is %s", node.Config.ProxmoxVMID, node.VMStatus), + Fix: fmt.Sprintf("qm start %d", node.Config.ProxmoxVMID), + }) + } + if node.TalosVersion == "" { + findings = append(findings, Finding{ + Cluster: cluster.Name, + Node: node.Config.Name, + Severity: rules.SeverityWarn, + Kind: "talos-reachability", + Message: fmt.Sprintf("Talos version unavailable for %s", node.Config.IP), + }) + } + for _, err := range node.Errors { + findings = append(findings, Finding{ + Cluster: cluster.Name, + Node: node.Config.Name, + Severity: rules.SeverityWarn, + Kind: "observation", + Message: err, + }) + } + for _, validation := range node.ValidationFindings { + findings = append(findings, Finding{ + Cluster: cluster.Name, + Node: node.Config.Name, + Severity: validation.Severity, + Kind: validation.Rule, + Message: validation.Message, + Fix: validation.Fix, + }) + } + } + } + sort.SliceStable(findings, func(i, j int) bool { + if findings[i].Cluster != findings[j].Cluster { + return findings[i].Cluster < findings[j].Cluster + } + if findings[i].Node != findings[j].Node { + return findings[i].Node < findings[j].Node + } + return findings[i].Kind < findings[j].Kind + }) + return findings +} + +// HasErrors reports whether findings include error-level drift. +func HasErrors(findings []Finding) bool { + for _, finding := range findings { + if finding.Severity == rules.SeverityError { + return true + } + } + return false +} + +// Remediations returns known fix commands. +func Remediations(findings []Finding) []Finding { + out := []Finding{} + for _, finding := range findings { + if finding.Fix != "" { + out = append(out, finding) + } + } + return out +} diff --git a/internal/drift/drift_test.go b/internal/drift/drift_test.go new file mode 100644 index 0000000..af95c0c --- /dev/null +++ b/internal/drift/drift_test.go @@ -0,0 +1,40 @@ +package drift + +import ( + "testing" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/health" + "github.com/OneNoted/pvt/internal/rules" +) + +func TestDetectIncludesValidationAndRuntimeFindings(t *testing.T) { + snapshot := health.Snapshot{ + Clusters: []health.ClusterSnapshot{{ + Name: "lab", + Nodes: []health.NodeSnapshot{{ + Config: config.NodeConfig{Name: "cp-1", IP: "10.0.0.10", ProxmoxVMID: 100}, + VMStatus: "stopped", + TalosVersion: "", + ValidationFindings: []rules.Finding{{ + Rule: "cpu-type", + Severity: rules.SeverityError, + Message: "CPU type is wrong", + Fix: "qm set 100 -cpu host", + }}, + }}, + }}, + } + + findings := Detect(snapshot) + if len(findings) != 3 { + t.Fatalf("Detect() produced %d findings, want 3", len(findings)) + } + if !HasErrors(findings) { + t.Fatal("HasErrors() = false, want true") + } + remediations := Remediations(findings) + if len(remediations) != 2 { + t.Fatalf("Remediations() produced %d findings, want 2", len(remediations)) + } +} diff --git a/internal/health/health.go b/internal/health/health.go new file mode 100644 index 0000000..bd876b4 --- /dev/null +++ b/internal/health/health.go @@ -0,0 +1,143 @@ +package health + +import ( + "context" + "fmt" + "sort" + + "github.com/OneNoted/pvt/internal/config" + "github.com/OneNoted/pvt/internal/proxmox" + "github.com/OneNoted/pvt/internal/rules" + "github.com/OneNoted/pvt/internal/talos" +) + +// Snapshot is a best-effort view of configured and live cluster state. +type Snapshot struct { + ConfigPath string + Clusters []ClusterSnapshot + Errors []string +} + +// ClusterSnapshot is the live state for one configured cluster. +type ClusterSnapshot struct { + Name string + Endpoint string + ProxmoxCluster string + Nodes []NodeSnapshot + Errors []string +} + +// NodeSnapshot is the live state for one configured node. +type NodeSnapshot struct { + Config config.NodeConfig + VMStatus string + VMConfig *proxmox.VMConfig + TalosVersion string + TalosVersionSource string + ValidationFindings []rules.Finding + Errors []string +} + +// Gather builds a best-effort health snapshot from config and live APIs. +func Gather(ctx context.Context, configPath string, cfg *config.Config) Snapshot { + snapshot := Snapshot{ConfigPath: configPath} + pxByName := make(map[string]config.ProxmoxCluster) + for _, px := range cfg.Proxmox.Clusters { + pxByName[px.Name] = px + } + + registry := rules.DefaultRegistry() + for _, clusterCfg := range cfg.Clusters { + clusterSnap := ClusterSnapshot{ + Name: clusterCfg.Name, + Endpoint: clusterCfg.Endpoint, + ProxmoxCluster: clusterCfg.ProxmoxCluster, + } + + versions := talosVersionsByNode(ctx, cfg, clusterCfg, &clusterSnap) + pxCfg, ok := pxByName[clusterCfg.ProxmoxCluster] + var pxClient *proxmox.Client + if ok { + client, err := proxmox.NewClient(pxCfg) + if err != nil { + clusterSnap.Errors = append(clusterSnap.Errors, fmt.Sprintf("proxmox client: %v", err)) + } else { + pxClient = client + } + } else { + clusterSnap.Errors = append(clusterSnap.Errors, fmt.Sprintf("unknown proxmox cluster %q", clusterCfg.ProxmoxCluster)) + } + + for _, node := range clusterCfg.Nodes { + nodeSnap := NodeSnapshot{Config: node} + if version, ok := versions[node.Name]; ok { + nodeSnap.TalosVersion = version.TalosVersion + nodeSnap.TalosVersionSource = version.Node + } else if version, ok := versions[node.IP]; ok { + nodeSnap.TalosVersion = version.TalosVersion + nodeSnap.TalosVersionSource = version.Node + } + + if pxClient != nil { + vmCfg, err := pxClient.GetVMConfig(ctx, node.ProxmoxNode, node.ProxmoxVMID) + if err != nil { + nodeSnap.Errors = append(nodeSnap.Errors, fmt.Sprintf("vm config: %v", err)) + } else { + nodeSnap.VMConfig = vmCfg + nodeSnap.VMStatus = "present" + nodeSnap.ValidationFindings = registry.Validate(vmCfg) + } + if summaries, err := pxClient.ListNodeVMs(ctx, node.ProxmoxNode); err == nil { + for _, summary := range summaries { + if summary.VMID == node.ProxmoxVMID { + nodeSnap.VMStatus = summary.Status + break + } + } + } + } + + clusterSnap.Nodes = append(clusterSnap.Nodes, nodeSnap) + } + sort.Slice(clusterSnap.Nodes, func(i, j int) bool { + return clusterSnap.Nodes[i].Config.Name < clusterSnap.Nodes[j].Config.Name + }) + snapshot.Clusters = append(snapshot.Clusters, clusterSnap) + } + return snapshot +} + +func talosVersionsByNode(ctx context.Context, cfg *config.Config, clusterCfg config.ClusterConfig, clusterSnap *ClusterSnapshot) map[string]talos.NodeVersion { + cpEndpoints := []string{} + allNodes := []string{} + for _, node := range clusterCfg.Nodes { + allNodes = append(allNodes, node.IP) + if node.Role == "controlplane" { + cpEndpoints = append(cpEndpoints, node.IP) + } + } + if len(cpEndpoints) == 0 { + clusterSnap.Errors = append(clusterSnap.Errors, "no control plane nodes configured for Talos query") + return nil + } + + client, err := talos.NewClient(ctx, cfg.Talos.ConfigPath, cfg.Talos.Context, cpEndpoints) + if err != nil { + clusterSnap.Errors = append(clusterSnap.Errors, fmt.Sprintf("talos client: %v", err)) + return nil + } + defer client.Close() + + versions, err := client.Version(ctx, allNodes...) + if err != nil { + clusterSnap.Errors = append(clusterSnap.Errors, fmt.Sprintf("talos versions: %v", err)) + return nil + } + + out := make(map[string]talos.NodeVersion) + for _, version := range versions { + out[version.Node] = version + out[version.Endpoint] = version + } + return out +} diff --git a/internal/machineconfig/diff.go b/internal/machineconfig/diff.go new file mode 100644 index 0000000..c370935 --- /dev/null +++ b/internal/machineconfig/diff.go @@ -0,0 +1,76 @@ +package machineconfig + +import ( + "bytes" + "fmt" + "os" + "strings" + + "gopkg.in/yaml.v3" +) + +// NormalizeYAML returns a consistently marshaled YAML representation. +func NormalizeYAML(data []byte) ([]byte, error) { + var value any + if err := yaml.Unmarshal(data, &value); err != nil { + return nil, fmt.Errorf("parsing YAML: %w", err) + } + normalized, err := yaml.Marshal(value) + if err != nil { + return nil, fmt.Errorf("normalizing YAML: %w", err) + } + return bytes.TrimSpace(normalized), nil +} + +// DiffFiles compares two YAML files after normalization. +func DiffFiles(leftPath, rightPath string) ([]string, bool, error) { + leftRaw, err := os.ReadFile(leftPath) + if err != nil { + return nil, false, fmt.Errorf("reading %s: %w", leftPath, err) + } + rightRaw, err := os.ReadFile(rightPath) + if err != nil { + return nil, false, fmt.Errorf("reading %s: %w", rightPath, err) + } + left, err := NormalizeYAML(leftRaw) + if err != nil { + return nil, false, fmt.Errorf("%s: %w", leftPath, err) + } + right, err := NormalizeYAML(rightRaw) + if err != nil { + return nil, false, fmt.Errorf("%s: %w", rightPath, err) + } + if bytes.Equal(left, right) { + return nil, false, nil + } + return lineDiff(string(left), string(right)), true, nil +} + +func lineDiff(left, right string) []string { + leftLines := strings.Split(left, "\n") + rightLines := strings.Split(right, "\n") + max := len(leftLines) + if len(rightLines) > max { + max = len(rightLines) + } + out := []string{} + for i := 0; i < max; i++ { + var l, r string + if i < len(leftLines) { + l = leftLines[i] + } + if i < len(rightLines) { + r = rightLines[i] + } + if l == r { + continue + } + if l != "" { + out = append(out, "- "+l) + } + if r != "" { + out = append(out, "+ "+r) + } + } + return out +} diff --git a/internal/machineconfig/loader_test.go b/internal/machineconfig/loader_test.go index 02dc0b5..a029677 100644 --- a/internal/machineconfig/loader_test.go +++ b/internal/machineconfig/loader_test.go @@ -74,3 +74,23 @@ func TestLoadMachineConfig_NotFound(t *testing.T) { t.Fatal("LoadMachineConfig() expected error for missing file") } } + +func TestDiffFilesNormalizesYAML(t *testing.T) { + dir := t.TempDir() + left := filepath.Join(dir, "left.yaml") + right := filepath.Join(dir, "right.yaml") + if err := os.WriteFile(left, []byte("machine:\n type: worker\n"), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(right, []byte("machine: {type: worker}\n"), 0644); err != nil { + t.Fatal(err) + } + + _, different, err := DiffFiles(left, right) + if err != nil { + t.Fatalf("DiffFiles() error = %v", err) + } + if different { + t.Fatal("DiffFiles() different = true, want false for equivalent YAML") + } +} diff --git a/internal/nodeops/nodeops.go b/internal/nodeops/nodeops.go new file mode 100644 index 0000000..c249a14 --- /dev/null +++ b/internal/nodeops/nodeops.go @@ -0,0 +1,68 @@ +package nodeops + +import ( + "fmt" + "strings" + + "github.com/OneNoted/pvt/internal/config" +) + +// Step is one planned node lifecycle operation. +type Step struct { + Order int + Command string + Args []string + Detail string +} + +// Plan builds a conservative plan for a node lifecycle action. +func Plan(action string, cluster config.ClusterConfig, node config.NodeConfig, replacement string) []Step { + switch action { + case "drain": + return []Step{ + newStep(1, []string{"kubectl", "drain", "--ignore-daemonsets", "--delete-emptydir-data", "--", node.Name}, "evict workloads before host-level maintenance"), + } + case "reboot": + return []Step{ + newStep(1, []string{"kubectl", "cordon", "--", node.Name}, "stop new workload placement"), + newStep(2, []string{"kubectl", "drain", "--ignore-daemonsets", "--delete-emptydir-data", "--", node.Name}, "evict workloads before reboot"), + newStep(3, []string{"talosctl", "reboot", "--nodes", node.IP}, "reboot Talos node"), + newStep(4, []string{"kubectl", "wait", "--for=condition=Ready", "--timeout=10m", "node/" + node.Name}, "wait for Kubernetes readiness"), + newStep(5, []string{"kubectl", "uncordon", "--", node.Name}, "resume scheduling after readiness is confirmed"), + } + case "remove": + return []Step{ + newStep(1, []string{"kubectl", "drain", "--ignore-daemonsets", "--delete-emptydir-data", "--", node.Name}, "evict workloads"), + newStep(2, []string{"kubectl", "delete", "node", "--", node.Name}, "remove Kubernetes node object"), + newStep(3, []string{"talosctl", "reset", "--nodes", node.IP, "--graceful=false"}, "reset Talos installation after data backup is confirmed"), + newStep(4, []string{"qm", "stop", fmt.Sprintf("%d", node.ProxmoxVMID)}, "stop VM in Proxmox"), + } + case "replace": + target := replacement + if target == "" { + target = "" + } + return []Step{ + newStep(1, []string{"pvt", "node", "add", target}, "bootstrap replacement node from config"), + newStep(2, []string{"pvt", "node", "drain", node.Name, "--execute"}, "move workloads off old node"), + newStep(3, []string{"pvt", "node", "remove", node.Name}, "review the old-node removal plan after replacement is healthy"), + } + case "add": + return []Step{ + newStep(1, []string{"pvt", "validate", "vm", node.Name}, "verify Proxmox VM settings"), + newStep(2, []string{"pvt", "bootstrap", cluster.Name, "--dry-run"}, "preview machine config application"), + newStep(3, []string{"talosctl", "apply-config", "--nodes", node.IP, "--file", ""}, "apply node machine config"), + } + default: + return nil + } +} + +func newStep(order int, args []string, detail string) Step { + return Step{ + Order: order, + Command: strings.Join(args, " "), + Args: args, + Detail: detail, + } +} diff --git a/internal/nodeops/nodeops_test.go b/internal/nodeops/nodeops_test.go new file mode 100644 index 0000000..1976d10 --- /dev/null +++ b/internal/nodeops/nodeops_test.go @@ -0,0 +1,39 @@ +package nodeops + +import ( + "strings" + "testing" + + "github.com/OneNoted/pvt/internal/config" +) + +func TestPlanRebootIsCordonRebootUncordon(t *testing.T) { + node := config.NodeConfig{Name: "worker-1", IP: "10.0.0.11"} + steps := Plan("reboot", config.ClusterConfig{Name: "lab"}, node, "") + if len(steps) != 5 { + t.Fatalf("Plan(reboot) produced %d steps, want 5", len(steps)) + } + if !strings.Contains(steps[1].Command, "kubectl drain") { + t.Fatalf("second command = %q, want kubectl drain", steps[1].Command) + } + if !strings.Contains(steps[2].Command, "talosctl reboot --nodes 10.0.0.11") { + t.Fatalf("third command = %q, want talos reboot", steps[2].Command) + } + if !strings.Contains(steps[3].Command, "kubectl wait") { + t.Fatalf("fourth command = %q, want readiness wait", steps[3].Command) + } + if len(steps[0].Args) == 0 || steps[0].Args[len(steps[0].Args)-1] != "worker-1" { + t.Fatalf("first step args = %#v, want structured node argument", steps[0].Args) + } +} + +func TestPlanReplaceUsesReplacementWhenProvided(t *testing.T) { + node := config.NodeConfig{Name: "old-worker"} + steps := Plan("replace", config.ClusterConfig{Name: "lab"}, node, "new-worker") + if len(steps) == 0 || !strings.Contains(steps[0].Command, "new-worker") { + t.Fatalf("Plan(replace) first step = %#v, want replacement node", steps) + } + if strings.Contains(steps[len(steps)-1].Command, "--execute") { + t.Fatalf("replace removal step = %q, want plan-only remove", steps[len(steps)-1].Command) + } +} diff --git a/internal/proxmox/client.go b/internal/proxmox/client.go index 9650aff..ad44907 100644 --- a/internal/proxmox/client.go +++ b/internal/proxmox/client.go @@ -5,6 +5,8 @@ import ( "crypto/tls" "fmt" "net/http" + "net/url" + "strings" pxapi "github.com/luthermonson/go-proxmox" @@ -32,6 +34,30 @@ type VMSummary struct { Node string } +// StorageSummary is a lightweight Proxmox storage representation. +type StorageSummary struct { + Name string + Node string + Type string + Content string + Active bool + Enabled bool + Used uint64 + Total uint64 + Avail uint64 +} + +// BackupEntry represents a vzdump backup volume on Proxmox storage. +type BackupEntry struct { + VolID string + Node string + Storage string + Format string + Size uint64 + CTime uint64 + VMID uint64 +} + // VMConfig holds the VM configuration fields relevant for validation. type VMConfig struct { VMID int @@ -148,6 +174,116 @@ func (c *Client) GetVMConfig(ctx context.Context, node string, vmid int) (*VMCon return cfg, nil } +// ListNodeVMs returns VM summaries for a Proxmox node. +func (c *Client) ListNodeVMs(ctx context.Context, node string) ([]VMSummary, error) { + pveNode, err := c.api.Node(ctx, node) + if err != nil { + return nil, fmt.Errorf("getting node %q: %w", node, err) + } + + vms, err := pveNode.VirtualMachines(ctx) + if err != nil { + return nil, fmt.Errorf("listing VMs on node %q: %w", node, err) + } + + out := make([]VMSummary, 0, len(vms)) + for _, vm := range vms { + out = append(out, VMSummary{ + VMID: int(vm.VMID), + Name: vm.Name, + Status: vm.Status, + Node: node, + }) + } + return out, nil +} + +// ListStorages returns storage summaries for a Proxmox node. +func (c *Client) ListStorages(ctx context.Context, node string) ([]StorageSummary, error) { + pveNode, err := c.api.Node(ctx, node) + if err != nil { + return nil, fmt.Errorf("getting node %q: %w", node, err) + } + + storages, err := pveNode.Storages(ctx) + if err != nil { + return nil, fmt.Errorf("listing storages on node %q: %w", node, err) + } + + out := make([]StorageSummary, 0, len(storages)) + for _, storage := range storages { + out = append(out, StorageSummary{ + Name: storage.Name, + Node: node, + Type: storage.Type, + Content: storage.Content, + Active: storage.Active != 0, + Enabled: storage.Enabled != 0, + Used: storage.Used, + Total: storage.Total, + Avail: storage.Avail, + }) + } + return out, nil +} + +// ListBackups returns vzdump backup entries from backup-capable storages. +func (c *Client) ListBackups(ctx context.Context, node string) ([]BackupEntry, error) { + pveNode, err := c.api.Node(ctx, node) + if err != nil { + return nil, fmt.Errorf("getting node %q: %w", node, err) + } + + storages, err := pveNode.Storages(ctx) + if err != nil { + return nil, fmt.Errorf("listing storages on node %q: %w", node, err) + } + + var backups []BackupEntry + for _, storage := range storages { + if !strings.Contains(storage.Content, "backup") { + continue + } + content, err := storage.GetContent(ctx) + if err != nil { + return nil, fmt.Errorf("listing backup content on %s/%s: %w", node, storage.Name, err) + } + for _, item := range content { + if item.Volid == "" || !isBackupContent(item.Format, item.Volid) { + continue + } + backups = append(backups, BackupEntry{ + VolID: item.Volid, + Node: node, + Storage: storage.Name, + Format: item.Format, + Size: item.Size, + CTime: uint64(item.Ctime), + VMID: item.VMID, + }) + } + } + return backups, nil +} + +// DeleteBackup deletes a backup volume from Proxmox storage. +func (c *Client) DeleteBackup(ctx context.Context, backup BackupEntry) error { + path := fmt.Sprintf("/nodes/%s/storage/%s/content/%s", backup.Node, backup.Storage, url.PathEscape(backup.VolID)) + var result any + if err := c.api.Delete(ctx, path, &result); err != nil { + return fmt.Errorf("deleting backup %q: %w", backup.VolID, err) + } + return nil +} + +func isBackupContent(format, volID string) bool { + switch format { + case "vma.zst", "vma.gz", "vma.lzo", "pbs-vm": + return true + } + return strings.Contains(volID, ":backup/") +} + // Ping verifies the client can connect to the Proxmox API. func (c *Client) Ping(ctx context.Context) error { _, err := c.api.Version(ctx) diff --git a/internal/proxmox/client_test.go b/internal/proxmox/client_test.go new file mode 100644 index 0000000..eb77756 --- /dev/null +++ b/internal/proxmox/client_test.go @@ -0,0 +1,24 @@ +package proxmox + +import "testing" + +func TestIsBackupContent(t *testing.T) { + tests := []struct { + name string + format string + volID string + want bool + }{ + {name: "vma zst", format: "vma.zst", want: true}, + {name: "pbs", format: "pbs-vm", want: true}, + {name: "volid backup path", format: "raw", volID: "local:backup/vzdump-qemu-100.vma.zst", want: true}, + {name: "iso", format: "iso", volID: "local:iso/talos.iso", want: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := isBackupContent(tt.format, tt.volID); got != tt.want { + t.Fatalf("isBackupContent(%q, %q) = %v, want %v", tt.format, tt.volID, got, tt.want) + } + }) + } +} diff --git a/tui/Cargo.lock b/tui/Cargo.lock new file mode 100644 index 0000000..9e473ed --- /dev/null +++ b/tui/Cargo.lock @@ -0,0 +1,1784 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "anyhow" +version = "1.0.102" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" + +[[package]] +name = "atomic" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" +dependencies = [ + "bytemuck", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bitflags" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4512299f36f043ab09a583e57bceb5a5aab7a73db1805848e8fef3c9e8c78b3" + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bumpalo" +version = "3.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" + +[[package]] +name = "bytemuck" +version = "1.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + +[[package]] +name = "castaway" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dec551ab6e7578819132c713a93c022a05d60159dc86e7a7050223577484c55a" +dependencies = [ + "rustversion", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + +[[package]] +name = "compact_str" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb1325a1cece981e8a296ab8f0f9b63ae357bd0784a9faaf548cc7b480707a" +dependencies = [ + "castaway", + "cfg-if", + "itoa", + "rustversion", + "ryu", + "static_assertions", +] + +[[package]] +name = "convert_case" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "cpufeatures" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" +dependencies = [ + "libc", +] + +[[package]] +name = "crossterm" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" +dependencies = [ + "bitflags 2.11.1", + "crossterm_winapi", + "derive_more", + "document-features", + "mio", + "parking_lot", + "rustix", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + +[[package]] +name = "crypto-common" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" +dependencies = [ + "generic-array", + "typenum", +] + +[[package]] +name = "csscolorparser" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb2a7d3066da2de787b7f032c736763eb7ae5d355f81a68bab2675a96008b0bf" +dependencies = [ + "lab", + "phf", +] + +[[package]] +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "deltae" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5729f5117e208430e437df2f4843f5e5952997175992d1414f94c57d61e270b4" + +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", +] + +[[package]] +name = "derive_more" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "rustc_version", + "syn 2.0.117", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "crypto-common", +] + +[[package]] +name = "document-features" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4b8a88685455ed29a21542a33abd9cb6510b6b129abadabdcef0f4c55bc8f61" +dependencies = [ + "litrs", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "euclid" +version = "0.22.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1a05365e3b1c6d1650318537c7460c6923f1abdd272ad6842baa2b509957a06" +dependencies = [ + "num-traits", +] + +[[package]] +name = "fancy-regex" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b95f7c0680e4142284cf8b22c14a476e87d61b004a3a0861872b32ef7ead40a2" +dependencies = [ + "bit-set", + "regex", +] + +[[package]] +name = "fastrand" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f1f227452a390804cdb637b74a86990f2a7d7ba4b7d5693aac9b4dd6defd8d6" + +[[package]] +name = "filedescriptor" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e40758ed24c9b2eeb76c35fb0aebc66c626084edd827e07e1552279814c6682d" +dependencies = [ + "libc", + "thiserror 1.0.69", + "winapi", +] + +[[package]] +name = "finl_unicode" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9844ddc3a6e533d62bba727eb6c28b5d360921d5175e9ff0f1e621a5c590a4d5" + +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi 5.3.0", + "wasip2", +] + +[[package]] +name = "getrandom" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" +dependencies = [ + "cfg-if", + "libc", + "r-efi 6.0.0", + "wasip2", + "wasip3", +] + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.2.0", +] + +[[package]] +name = "hashbrown" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f467dd6dccf739c208452f8014c75c18bb8301b050ad1cfb27153803edb0f51" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "indexmap" +version = "2.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d466e9454f08e4a911e14806c24e16fba1b4c121d1ea474396f396069cf949d9" +dependencies = [ + "equivalent", + "hashbrown 0.17.0", + "serde", + "serde_core", +] + +[[package]] +name = "indoc" +version = "2.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706" +dependencies = [ + "rustversion", +] + +[[package]] +name = "instability" +version = "0.3.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eb2d60ef19920a3a9193c3e371f726ec1dafc045dac788d0fb3704272458971" +dependencies = [ + "darling", + "indoc", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" + +[[package]] +name = "js-sys" +version = "0.3.95" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2964e92d1d9dc3364cae4d718d93f227e3abb088e747d92e0395bfdedf1c12ca" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "kasuari" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bde5057d6143cc94e861d90f591b9303d6716c6b9602309150bd068853c10899" +dependencies = [ + "hashbrown 0.16.1", + "portable-atomic", + "thiserror 2.0.18", +] + +[[package]] +name = "lab" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf36173d4167ed999940f804952e6b08197cae5ad5d572eb4db150ce8ad5d58f" + +[[package]] +name = "lazy_static" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "libc" +version = "0.2.185" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ff2c0fe9bc6cb6b14a0592c2ff4fa9ceb83eea9db979b0487cd054946a2b8f" + +[[package]] +name = "line-clipping" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f50e8f47623268b5407192d26876c4d7f89d686ca130fdc53bced4814cd29f8" +dependencies = [ + "bitflags 2.11.1", +] + +[[package]] +name = "linux-raw-sys" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" + +[[package]] +name = "litrs" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11d3d7f243d5c5a8b9bb5d6dd2b1602c0cb0b9db1621bafc7ed66e35ff9fe092" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lru" +version = "0.16.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f66e8d5d03f609abc3a39e6f08e4164ebf1447a732906d39eb9b99b7919ef39" +dependencies = [ + "hashbrown 0.16.1", +] + +[[package]] +name = "mac_address" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0aeb26bf5e836cc1c341c8106051b573f1766dfa05aa87f0b98be5e51b02303" +dependencies = [ + "nix", + "winapi", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "memmem" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a64a92489e2744ce060c349162be1c5f33c6969234104dbd99ddb5feb08b8c15" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "mio" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys", +] + +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.11.1", + "cfg-if", + "cfg_aliases", + "libc", + "memoffset", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "num-conv" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967" + +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", +] + +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + +[[package]] +name = "once_cell" +version = "1.21.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f7c3e4beb33f85d45ae3e3a1792185706c8e16d043238c593331cc7cd313b50" + +[[package]] +name = "ordered-float" +version = "4.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" +dependencies = [ + "num-traits", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "pest" +version = "2.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662" +dependencies = [ + "memchr", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "pest_meta" +version = "2.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220" +dependencies = [ + "pest", + "sha2", +] + +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_macros", + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand", +] + +[[package]] +name = "phf_macros" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" +dependencies = [ + "phf_generator", + "phf_shared", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "phf_shared" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" +dependencies = [ + "siphasher", +] + +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "r-efi" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" + +[[package]] +name = "rand" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca0ecfa931c29007047d1bc58e623ab12e5590e8c7cc53200d5202b69266d8a" +dependencies = [ + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + +[[package]] +name = "ratatui" +version = "0.30.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d1ce67fb8ba4446454d1c8dbaeda0557ff5e94d39d5e5ed7f10a65eb4c8266bc" +dependencies = [ + "instability", + "ratatui-core", + "ratatui-crossterm", + "ratatui-macros", + "ratatui-termwiz", + "ratatui-widgets", +] + +[[package]] +name = "ratatui-core" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ef8dea09a92caaf73bff7adb70b76162e5937524058a7e5bff37869cbbec293" +dependencies = [ + "bitflags 2.11.1", + "compact_str", + "hashbrown 0.16.1", + "indoc", + "itertools", + "kasuari", + "lru", + "strum", + "thiserror 2.0.18", + "unicode-segmentation", + "unicode-truncate", + "unicode-width", +] + +[[package]] +name = "ratatui-crossterm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "577c9b9f652b4c121fb25c6a391dd06406d3b092ba68827e6d2f09550edc54b3" +dependencies = [ + "cfg-if", + "crossterm", + "instability", + "ratatui-core", +] + +[[package]] +name = "ratatui-macros" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7f1342a13e83e4bb9d0b793d0ea762be633f9582048c892ae9041ef39c936f4" +dependencies = [ + "ratatui-core", + "ratatui-widgets", +] + +[[package]] +name = "ratatui-termwiz" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f76fe0bd0ed4295f0321b1676732e2454024c15a35d01904ddb315afd3d545c" +dependencies = [ + "ratatui-core", + "termwiz", +] + +[[package]] +name = "ratatui-widgets" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7dbfa023cd4e604c2553483820c5fe8aa9d71a42eea5aa77c6e7f35756612db" +dependencies = [ + "bitflags 2.11.1", + "hashbrown 0.16.1", + "indoc", + "instability", + "itertools", + "line-clipping", + "ratatui-core", + "strum", + "time", + "unicode-segmentation", + "unicode-width", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags 2.11.1", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" +dependencies = [ + "bitflags 2.11.1", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap", + "itoa", + "ryu", + "serde", + "unsafe-libyaml", +] + +[[package]] +name = "sha2" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "signal-hook" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-mio" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc" +dependencies = [ + "libc", + "mio", + "signal-hook", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" +dependencies = [ + "errno", + "libc", +] + +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + +[[package]] +name = "strum" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "tempfile" +version = "3.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" +dependencies = [ + "fastrand", + "getrandom 0.4.2", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "terminfo" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4ea810f0692f9f51b382fff5893887bb4580f5fa246fde546e0b13e7fcee662" +dependencies = [ + "fnv", + "nom", + "phf", + "phf_codegen", +] + +[[package]] +name = "termios" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "411c5bf740737c7918b8b1fe232dca4dc9f8e754b8ad5e20966814001ed0ac6b" +dependencies = [ + "libc", +] + +[[package]] +name = "termwiz" +version = "0.23.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4676b37242ccbd1aabf56edb093a4827dc49086c0ffd764a5705899e0f35f8f7" +dependencies = [ + "anyhow", + "base64", + "bitflags 2.11.1", + "fancy-regex", + "filedescriptor", + "finl_unicode", + "fixedbitset", + "hex", + "lazy_static", + "libc", + "log", + "memmem", + "nix", + "num-derive", + "num-traits", + "ordered-float", + "pest", + "pest_derive", + "phf", + "sha2", + "signal-hook", + "siphasher", + "terminfo", + "termios", + "thiserror 1.0.69", + "ucd-trie", + "unicode-segmentation", + "vtparse", + "wezterm-bidi", + "wezterm-blob-leases", + "wezterm-color-types", + "wezterm-dynamic", + "wezterm-input-types", + "winapi", +] + +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde_core", + "time-core", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "typenum" +version = "1.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40ce102ab67701b8526c123c1bab5cbe42d7040ccfd0f64af1a385808d2f43de" + +[[package]] +name = "ucd-trie" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-segmentation" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9629274872b2bfaf8d66f5f15725007f635594914870f65218920345aa11aa8c" + +[[package]] +name = "unicode-truncate" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b380a1238663e5f8a691f9039c73e1cdae598a30e9855f541d29b08b53e9a5" +dependencies = [ + "itertools", + "unicode-segmentation", + "unicode-width", +] + +[[package]] +name = "unicode-width" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "uuid" +version = "1.23.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd74a9687298c6858e9b88ec8935ec45d22e8fd5e6394fa1bd4e99a87789c76" +dependencies = [ + "atomic", + "getrandom 0.4.2", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "vitui" +version = "0.1.0" +dependencies = [ + "anyhow", + "crossterm", + "ratatui", + "serde", + "serde_json", + "serde_yaml", + "tempfile", +] + +[[package]] +name = "vtparse" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d9b2acfb050df409c972a37d3b8e08cdea3bddb0c09db9d53137e504cfabed0" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.3+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20064672db26d7cdc89c7798c48a0fdfac8213434a1186e5ef29fd560ae223d6" +dependencies = [ + "wit-bindgen 0.57.1", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen 0.51.0", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf938a0bacb0469e83c1e148908bd7d5a6010354cf4fb73279b7447422e3a89" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eeff24f84126c0ec2db7a449f0c2ec963c6a49efe0698c4242929da037ca28ed" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d08065faf983b2b80a79fd87d8254c409281cf7de75fc4b773019824196c904" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.117", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.118" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd04d9e306f1907bd13c6361b5c6bfc7b3b3c095ed3f8a9246390f8dbdee129" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags 2.11.1", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "wezterm-bidi" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0a6e355560527dd2d1cf7890652f4f09bb3433b6aadade4c9b5ed76de5f3ec" +dependencies = [ + "log", + "wezterm-dynamic", +] + +[[package]] +name = "wezterm-blob-leases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692daff6d93d94e29e4114544ef6d5c942a7ed998b37abdc19b17136ea428eb7" +dependencies = [ + "getrandom 0.3.4", + "mac_address", + "sha2", + "thiserror 1.0.69", + "uuid", +] + +[[package]] +name = "wezterm-color-types" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7de81ef35c9010270d63772bebef2f2d6d1f2d20a983d27505ac850b8c4b4296" +dependencies = [ + "csscolorparser", + "deltae", + "lazy_static", + "wezterm-dynamic", +] + +[[package]] +name = "wezterm-dynamic" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f2ab60e120fd6eaa68d9567f3226e876684639d22a4219b313ff69ec0ccd5ac" +dependencies = [ + "log", + "ordered-float", + "strsim", + "thiserror 1.0.69", + "wezterm-dynamic-derive", +] + +[[package]] +name = "wezterm-dynamic-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46c0cf2d539c645b448eaffec9ec494b8b19bd5077d9e58cb1ae7efece8d575b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "wezterm-input-types" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7012add459f951456ec9d6c7e6fc340b1ce15d6fc9629f8c42853412c029e57e" +dependencies = [ + "bitflags 1.3.2", + "euclid", + "lazy_static", + "serde", + "wezterm-dynamic", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen" +version = "0.57.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ebf944e87a7c253233ad6766e082e3cd714b5d03812acc24c318f549614536e" + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags 2.11.1", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/tui/Cargo.toml b/tui/Cargo.toml new file mode 100644 index 0000000..e848c19 --- /dev/null +++ b/tui/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "vitui" +version = "0.1.0" +edition = "2024" + +[[bin]] +name = "vitui" +path = "src/main.rs" + +[dependencies] +anyhow = "1" +crossterm = "0.29" +ratatui = "0.30" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +serde_yaml = "0.9" + +[dev-dependencies] +tempfile = "3" diff --git a/tui/build.zig b/tui/build.zig deleted file mode 100644 index 8dc1565..0000000 --- a/tui/build.zig +++ /dev/null @@ -1,55 +0,0 @@ -const std = @import("std"); - -pub fn build(b: *std.Build) void { - const target = b.standardTargetOptions(.{}); - const optimize = b.standardOptimizeOption(.{}); - - // Dependencies - const vaxis_dep = b.dependency("vaxis", .{ - .target = target, - .optimize = optimize, - }); - const yaml_dep = b.dependency("zig_yaml", .{ - .target = target, - .optimize = optimize, - }); - - // Executable - const exe = b.addExecutable(.{ - .name = "vitui", - .root_module = b.createModule(.{ - .root_source_file = b.path("src/main.zig"), - .target = target, - .optimize = optimize, - .imports = &.{ - .{ .name = "vaxis", .module = vaxis_dep.module("vaxis") }, - .{ .name = "yaml", .module = yaml_dep.module("yaml") }, - }, - }), - }); - b.installArtifact(exe); - - // Run step - const run_step = b.step("run", "Run vitui"); - const run_cmd = b.addRunArtifact(exe); - run_step.dependOn(&run_cmd.step); - run_cmd.step.dependOn(b.getInstallStep()); - if (b.args) |args| { - run_cmd.addArgs(args); - } - - // Tests - const exe_tests = b.addTest(.{ - .root_module = b.createModule(.{ - .root_source_file = b.path("src/main.zig"), - .target = target, - .optimize = optimize, - .imports = &.{ - .{ .name = "vaxis", .module = vaxis_dep.module("vaxis") }, - .{ .name = "yaml", .module = yaml_dep.module("yaml") }, - }, - }), - }); - const test_step = b.step("test", "Run tests"); - test_step.dependOn(&b.addRunArtifact(exe_tests).step); -} diff --git a/tui/build.zig.zon b/tui/build.zig.zon deleted file mode 100644 index 57d1c24..0000000 --- a/tui/build.zig.zon +++ /dev/null @@ -1,21 +0,0 @@ -.{ - .name = .vitui, - .version = "0.1.0", - .fingerprint = 0xc340ce385d55450f, - .minimum_zig_version = "0.15.2", - .dependencies = .{ - .vaxis = .{ - .url = "git+https://github.com/rockorager/libvaxis#41fff922316dcb8776332ec460e73eaf397d5033", - .hash = "vaxis-0.5.1-BWNV_JJOCQAtdJyLvrYCKbKIhX9q3liQkKMAzujWS4HJ", - }, - .zig_yaml = .{ - .url = "git+https://github.com/kubkon/zig-yaml#a6c2cd8760bf45c49b17a3f6259c4dfe3ded528e", - .hash = "zig_yaml-0.2.0-C1161pmrAgDnipDTh_4v4RQD27XN5GNaVlzzvlXf1jfW", - }, - }, - .paths = .{ - "build.zig", - "build.zig.zon", - "src", - }, -} diff --git a/tui/src/api/http_client.zig b/tui/src/api/http_client.zig deleted file mode 100644 index 2504f8b..0000000 --- a/tui/src/api/http_client.zig +++ /dev/null @@ -1,120 +0,0 @@ -const std = @import("std"); -const config = @import("../config.zig"); -const Allocator = std.mem.Allocator; - -/// HTTP client that uses curl subprocess for Proxmox API requests. -/// Handles PVE API token auth and TLS certificate skipping for self-signed certs. -pub const HttpClient = struct { - allocator: Allocator, - endpoint: []const u8, - token_id: []const u8, - token_secret: []const u8, - tls_verify: bool, - - pub fn init(allocator: Allocator, pve: config.ProxmoxCluster) HttpClient { - return .{ - .allocator = allocator, - .endpoint = pve.endpoint, - .token_id = pve.token_id, - .token_secret = pve.token_secret, - .tls_verify = pve.tls_verify, - }; - } - - /// Perform a GET request. Caller owns the returned memory. - pub fn get(self: HttpClient, path: []const u8) ![]const u8 { - return self.request("GET", path); - } - - /// Perform a DELETE request. Caller owns the returned memory. - pub fn delete(self: HttpClient, path: []const u8) ![]const u8 { - return self.request("DELETE", path); - } - - fn request(self: HttpClient, method: []const u8, path: []const u8) ![]const u8 { - const url = try std.fmt.allocPrint(self.allocator, "{s}{s}", .{ self.endpoint, path }); - defer self.allocator.free(url); - - const auth = try std.fmt.allocPrint(self.allocator, "Authorization: PVEAPIToken={s}={s}", .{ self.token_id, self.token_secret }); - defer self.allocator.free(auth); - - var argv_list: std.ArrayListUnmanaged([]const u8) = .empty; - defer argv_list.deinit(self.allocator); - - try argv_list.appendSlice(self.allocator, &.{ "curl", "-s", "-f", "--max-time", "10" }); - if (!std.mem.eql(u8, method, "GET")) { - try argv_list.appendSlice(self.allocator, &.{ "-X", method }); - } - try argv_list.appendSlice(self.allocator, &.{ "-H", auth }); - if (!self.tls_verify) { - try argv_list.append(self.allocator, "-k"); - } - try argv_list.append(self.allocator, url); - - const result = std.process.Child.run(.{ - .allocator = self.allocator, - .argv = argv_list.items, - .max_output_bytes = 1024 * 1024, - }) catch { - return error.HttpRequestFailed; - }; - defer self.allocator.free(result.stderr); - - const term = result.term; - if (term == .Exited and term.Exited == 0) { - return result.stdout; - } - - self.allocator.free(result.stdout); - return error.HttpRequestFailed; - } - - pub fn deinit(self: *HttpClient) void { - _ = self; - } -}; - -/// Parse a JSON response body and extract the "data" field. -/// Returns the parsed JSON Value. Caller must call `parsed.deinit()`. -pub fn parseJsonResponse(allocator: Allocator, body: []const u8) !std.json.Parsed(std.json.Value) { - return std.json.parseFromSlice(std.json.Value, allocator, body, .{ - .ignore_unknown_fields = true, - .allocate = .alloc_always, - }); -} - -/// Extract a string field from a JSON object, returning a default if missing. -pub fn jsonStr(obj: std.json.ObjectMap, key: []const u8, default: []const u8) []const u8 { - const val = obj.get(key) orelse return default; - return switch (val) { - .string => |s| s, - else => default, - }; -} - -/// Extract an integer field from a JSON object, returning a default if missing. -pub fn jsonInt(obj: std.json.ObjectMap, key: []const u8, default: i64) i64 { - const val = obj.get(key) orelse return default; - return switch (val) { - .integer => |i| i, - .float => |f| @intFromFloat(f), - .string => |s| std.fmt.parseInt(i64, s, 10) catch default, - else => default, - }; -} - -/// Extract a float field from a JSON object, returning a default if missing. -pub fn jsonFloat(obj: std.json.ObjectMap, key: []const u8, default: f64) f64 { - const val = obj.get(key) orelse return default; - return switch (val) { - .float => |f| f, - .integer => |i| @floatFromInt(i), - else => default, - }; -} - -test "jsonStr returns default for missing key" { - var map = std.json.ObjectMap.init(std.testing.allocator); - defer map.deinit(); - try std.testing.expectEqualStrings("fallback", jsonStr(map, "missing", "fallback")); -} diff --git a/tui/src/api/kubernetes.zig b/tui/src/api/kubernetes.zig deleted file mode 100644 index 5f39778..0000000 --- a/tui/src/api/kubernetes.zig +++ /dev/null @@ -1,249 +0,0 @@ -const std = @import("std"); -const config = @import("../config.zig"); -const http = @import("http_client.zig"); -const Allocator = std.mem.Allocator; - -pub const K8sBackupEntry = struct { - name: []const u8, - namespace: []const u8, - source_type: []const u8, // "VolSync" or "Velero" - status: []const u8, - schedule: []const u8, - last_run: []const u8, -}; - -pub const DetectedProviders = struct { - volsync: bool = false, - velero: bool = false, -}; - -pub const KubeClient = struct { - allocator: Allocator, - kubeconfig: []const u8, - - pub fn init(allocator: Allocator, kubeconfig: []const u8) KubeClient { - return .{ - .allocator = allocator, - .kubeconfig = kubeconfig, - }; - } - - /// Detect which backup providers (VolSync, Velero) are installed by checking CRDs. - pub fn detectProviders(self: *KubeClient) DetectedProviders { - const output = self.runKubectl(&.{ - "get", "crd", "--no-headers", "-o", "custom-columns=NAME:.metadata.name", - }) orelse return .{}; - defer self.allocator.free(output); - - var result = DetectedProviders{}; - var lines = std.mem.splitScalar(u8, output, '\n'); - while (lines.next()) |line| { - const trimmed = std.mem.trim(u8, line, " \t\r"); - if (trimmed.len == 0) continue; - if (std.mem.indexOf(u8, trimmed, "volsync") != null) result.volsync = true; - if (std.mem.indexOf(u8, trimmed, "velero") != null) result.velero = true; - } - return result; - } - - /// Fetch VolSync ReplicationSources across all namespaces. - pub fn getVolsyncSources(self: *KubeClient) []K8sBackupEntry { - const output = self.runKubectl(&.{ - "get", "replicationsources.volsync.backube", "-A", "-o", "json", - }) orelse return &.{}; - defer self.allocator.free(output); - - return self.parseK8sBackups(output, "VolSync"); - } - - /// Fetch Velero Backups across all namespaces. - pub fn getVeleroBackups(self: *KubeClient) []K8sBackupEntry { - const output = self.runKubectl(&.{ - "get", "backups.velero.io", "-A", "-o", "json", - }) orelse return &.{}; - defer self.allocator.free(output); - - return self.parseK8sBackups(output, "Velero"); - } - - fn parseK8sBackups(self: *KubeClient, output: []const u8, source_type: []const u8) []K8sBackupEntry { - var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ - .ignore_unknown_fields = true, - .allocate = .alloc_always, - }) catch return &.{}; - defer parsed.deinit(); - - const root = switch (parsed.value) { - .object => |obj| obj, - else => return &.{}, - }; - - const items = switch (root.get("items") orelse return &.{}) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var results: std.ArrayListUnmanaged(K8sBackupEntry) = .empty; - for (items) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - - const metadata = switch (obj.get("metadata") orelse continue) { - .object => |o| o, - else => continue, - }; - - const name = self.allocator.dupe(u8, http.jsonStr(metadata, "name", "unknown")) catch continue; - const namespace = self.allocator.dupe(u8, http.jsonStr(metadata, "namespace", "default")) catch continue; - - // Extract status and schedule based on source type - var status: []const u8 = undefined; - var schedule: []const u8 = undefined; - var last_run: []const u8 = undefined; - - if (std.mem.eql(u8, source_type, "VolSync")) { - status = self.parseVolsyncStatus(obj); - schedule = self.parseVolsyncSchedule(obj); - last_run = self.parseVolsyncLastRun(obj); - } else { - status = self.parseVeleroStatus(obj); - schedule = self.parseVeleroSchedule(obj); - last_run = self.parseVeleroLastRun(obj); - } - - results.append(self.allocator, .{ - .name = name, - .namespace = namespace, - .source_type = self.allocator.dupe(u8, source_type) catch continue, - .status = status, - .schedule = schedule, - .last_run = last_run, - }) catch continue; - } - - return results.toOwnedSlice(self.allocator) catch &.{}; - } - - fn parseVolsyncStatus(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { - .object => |o| o, - else => return self.allocator.dupe(u8, "unknown") catch "unknown", - }; - - // Check conditions array for Synchronizing condition - const conditions = switch (status_obj.get("conditions") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { - .array => |arr| arr.items, - else => return self.allocator.dupe(u8, "unknown") catch "unknown", - }; - - for (conditions) |cond| { - const cond_obj = switch (cond) { - .object => |o| o, - else => continue, - }; - const cond_type = http.jsonStr(cond_obj, "type", ""); - if (std.mem.eql(u8, cond_type, "Synchronizing")) { - const cond_status = http.jsonStr(cond_obj, "status", "Unknown"); - if (std.mem.eql(u8, cond_status, "True")) { - return self.allocator.dupe(u8, "Syncing") catch "Syncing"; - } - return self.allocator.dupe(u8, "Idle") catch "Idle"; - } - } - - return self.allocator.dupe(u8, "unknown") catch "unknown"; - } - - fn parseVolsyncSchedule(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const spec = switch (obj.get("spec") orelse return self.allocator.dupe(u8, "-") catch "-") { - .object => |o| o, - else => return self.allocator.dupe(u8, "-") catch "-", - }; - const trigger = switch (spec.get("trigger") orelse return self.allocator.dupe(u8, "-") catch "-") { - .object => |o| o, - else => return self.allocator.dupe(u8, "-") catch "-", - }; - return self.allocator.dupe(u8, http.jsonStr(trigger, "schedule", "-")) catch "-"; - } - - fn parseVolsyncLastRun(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "-") catch "-") { - .object => |o| o, - else => return self.allocator.dupe(u8, "-") catch "-", - }; - return self.allocator.dupe(u8, http.jsonStr(status_obj, "lastSyncTime", "-")) catch "-"; - } - - fn parseVeleroStatus(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { - .object => |o| o, - else => return self.allocator.dupe(u8, "unknown") catch "unknown", - }; - return self.allocator.dupe(u8, http.jsonStr(status_obj, "phase", "unknown")) catch "unknown"; - } - - fn parseVeleroSchedule(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const spec = switch (obj.get("spec") orelse return self.allocator.dupe(u8, "-") catch "-") { - .object => |o| o, - else => return self.allocator.dupe(u8, "-") catch "-", - }; - return self.allocator.dupe(u8, http.jsonStr(spec, "scheduleName", "-")) catch "-"; - } - - fn parseVeleroLastRun(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { - const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "-") catch "-") { - .object => |o| o, - else => return self.allocator.dupe(u8, "-") catch "-", - }; - return self.allocator.dupe(u8, http.jsonStr(status_obj, "completionTimestamp", "-")) catch "-"; - } - - /// Run a kubectl command with standard flags and return stdout. - /// Returns null on any failure. - fn runKubectl(self: *KubeClient, extra_args: []const []const u8) ?[]const u8 { - var argv: std.ArrayListUnmanaged([]const u8) = .empty; - defer argv.deinit(self.allocator); - - argv.append(self.allocator, "kubectl") catch return null; - argv.appendSlice(self.allocator, extra_args) catch return null; - argv.appendSlice(self.allocator, &.{ - "--kubeconfig", self.kubeconfig, - }) catch return null; - - const result = std.process.Child.run(.{ - .allocator = self.allocator, - .argv = argv.items, - .max_output_bytes = 512 * 1024, - }) catch return null; - defer self.allocator.free(result.stderr); - - const term = result.term; - if (term == .Exited and term.Exited == 0) { - return result.stdout; - } - - self.allocator.free(result.stdout); - return null; - } - - pub fn deinit(self: *KubeClient) void { - _ = self; - } -}; - -/// Derive the kubeconfig path from a talos config path. -/// Given "~/talos/apollo/talosconfig", returns "~/talos/apollo/kubeconfig". -pub fn deriveKubeconfig(allocator: Allocator, talos_config_path: []const u8) ?[]const u8 { - // Find the last path separator - const dir_end = std.mem.lastIndexOfScalar(u8, talos_config_path, '/') orelse return null; - return std.fmt.allocPrint(allocator, "{s}/kubeconfig", .{talos_config_path[0..dir_end]}) catch null; -} - -test "deriveKubeconfig" { - const alloc = std.testing.allocator; - const result = deriveKubeconfig(alloc, "~/talos/apollo/talosconfig") orelse unreachable; - defer alloc.free(result); - try std.testing.expectEqualStrings("~/talos/apollo/kubeconfig", result); -} diff --git a/tui/src/api/metrics.zig b/tui/src/api/metrics.zig deleted file mode 100644 index 70cf272..0000000 --- a/tui/src/api/metrics.zig +++ /dev/null @@ -1,284 +0,0 @@ -const std = @import("std"); -const config = @import("../config.zig"); -const http = @import("http_client.zig"); -const Allocator = std.mem.Allocator; - -pub const PodMetrics = struct { - pod: []const u8, - namespace: []const u8, - cpu_cores: f64, // fractional cores - memory_bytes: f64, - net_rx_bytes_sec: f64, - net_tx_bytes_sec: f64, -}; - -pub const NodeMetrics = struct { - instance: []const u8, - cpu_usage: f64, // 0.0 - 1.0 - mem_used: f64, // bytes - mem_total: f64, // bytes -}; - -pub const MetricsClient = struct { - allocator: Allocator, - endpoint: []const u8, // e.g. "http://prometheus.monitoring.svc:9090" - available: bool = false, - - pub fn init(allocator: Allocator, kubeconfig: []const u8) MetricsClient { - // Autodetect metrics endpoint by querying known service names - const candidates = [_]struct { ns: []const u8, svc: []const u8, port: []const u8 }{ - .{ .ns = "monitoring", .svc = "vmsingle-victoria-metrics-victoria-metrics-single-server", .port = "8428" }, - .{ .ns = "monitoring", .svc = "vmselect", .port = "8481" }, - .{ .ns = "monitoring", .svc = "prometheus-server", .port = "9090" }, - .{ .ns = "monitoring", .svc = "prometheus-operated", .port = "9090" }, - .{ .ns = "observability", .svc = "prometheus-server", .port = "9090" }, - .{ .ns = "observability", .svc = "prometheus-operated", .port = "9090" }, - }; - - for (candidates) |c| { - const endpoint = detectEndpoint(allocator, kubeconfig, c.ns, c.svc, c.port); - if (endpoint) |ep| { - if (!probeEndpoint(allocator, ep)) { - allocator.free(ep); - continue; - } - return .{ - .allocator = allocator, - .endpoint = ep, - .available = true, - }; - } - } - - return .{ - .allocator = allocator, - .endpoint = "", - .available = false, - }; - } - - fn detectEndpoint(allocator: Allocator, kubeconfig: []const u8, ns: []const u8, svc: []const u8, port: []const u8) ?[]const u8 { - // Use kubectl to check if the service exists - var argv: std.ArrayListUnmanaged([]const u8) = .empty; - defer argv.deinit(allocator); - argv.appendSlice(allocator, &.{ - "kubectl", "get", "svc", svc, "-n", ns, - "--kubeconfig", kubeconfig, - "--no-headers", "-o", "name", - }) catch return null; - - const result = std.process.Child.run(.{ - .allocator = allocator, - .argv = argv.items, - .max_output_bytes = 4096, - }) catch return null; - defer allocator.free(result.stderr); - defer allocator.free(result.stdout); - - const term = result.term; - if (term == .Exited and term.Exited == 0 and result.stdout.len > 0) { - return std.fmt.allocPrint(allocator, "http://{s}.{s}.svc:{s}", .{ svc, ns, port }) catch null; - } - return null; - } - - fn probeEndpoint(allocator: Allocator, endpoint: []const u8) bool { - const url = std.fmt.allocPrint(allocator, "{s}/api/v1/query?query=up", .{endpoint}) catch return false; - defer allocator.free(url); - - const result = std.process.Child.run(.{ - .allocator = allocator, - .argv = &.{ "curl", "-s", "-f", "--max-time", "5", url }, - .max_output_bytes = 16 * 1024, - }) catch return false; - defer allocator.free(result.stderr); - defer allocator.free(result.stdout); - - return result.term == .Exited and result.term.Exited == 0; - } - - /// Query pod CPU usage via PromQL. - pub fn getPodCpu(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "sum(rate(container_cpu_usage_seconds_total{container!=\"\",pod!=\"\"}[5m])) by (pod, namespace)", - ); - } - - /// Query pod memory usage via PromQL. - pub fn getPodMemory(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "sum(container_memory_working_set_bytes{container!=\"\",pod!=\"\"}) by (pod, namespace)", - ); - } - - /// Query pod network rx via PromQL. - pub fn getPodNetRx(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "sum(rate(container_network_receive_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)", - ); - } - - /// Query pod network tx via PromQL. - pub fn getPodNetTx(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "sum(rate(container_network_transmit_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)", - ); - } - - /// Query node CPU usage via PromQL. - pub fn getNodeCpu(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) by (instance)", - ); - } - - /// Query node memory usage via PromQL. - pub fn getNodeMemUsed(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL( - "node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes", - ); - } - - /// Query node total memory via PromQL. - pub fn getNodeMemTotal(self: *MetricsClient) []PodMetricValue { - if (!self.available) return &.{}; - return self.queryPromQL("node_memory_MemTotal_bytes"); - } - - pub const PodMetricValue = struct { - labels: std.json.ObjectMap, - value: f64, - }; - - fn queryPromQL(self: *MetricsClient, query: []const u8) []PodMetricValue { - const alloc = self.allocator; - - // URL-encode the query - var encoded: std.ArrayListUnmanaged(u8) = .empty; - defer encoded.deinit(alloc); - for (query) |c| { - switch (c) { - ' ' => encoded.appendSlice(alloc, "%20") catch return &.{}, - '"' => encoded.appendSlice(alloc, "%22") catch return &.{}, - '{' => encoded.appendSlice(alloc, "%7B") catch return &.{}, - '}' => encoded.appendSlice(alloc, "%7D") catch return &.{}, - '!' => encoded.appendSlice(alloc, "%21") catch return &.{}, - '[' => encoded.appendSlice(alloc, "%5B") catch return &.{}, - ']' => encoded.appendSlice(alloc, "%5D") catch return &.{}, - '=' => encoded.appendSlice(alloc, "%3D") catch return &.{}, - else => encoded.append(alloc, c) catch return &.{}, - } - } - - const url = std.fmt.allocPrint(alloc, "{s}/api/v1/query?query={s}", .{ - self.endpoint, - encoded.items, - }) catch return &.{}; - defer alloc.free(url); - - // Use curl to query (unauthenticated, in-cluster) - var argv: std.ArrayListUnmanaged([]const u8) = .empty; - defer argv.deinit(alloc); - argv.appendSlice(alloc, &.{ - "curl", "-s", "-f", "--max-time", "5", url, - }) catch return &.{}; - - const result = std.process.Child.run(.{ - .allocator = alloc, - .argv = argv.items, - .max_output_bytes = 1024 * 1024, - }) catch return &.{}; - defer alloc.free(result.stderr); - - const term = result.term; - if (!(term == .Exited and term.Exited == 0)) { - alloc.free(result.stdout); - return &.{}; - } - defer alloc.free(result.stdout); - - return self.parsePromResponse(result.stdout); - } - - fn parsePromResponse(self: *MetricsClient, body: []const u8) []PodMetricValue { - const alloc = self.allocator; - var parsed = std.json.parseFromSlice(std.json.Value, alloc, body, .{ - .ignore_unknown_fields = true, - .allocate = .alloc_always, - }) catch return &.{}; - defer parsed.deinit(); - - const root = switch (parsed.value) { - .object => |obj| obj, - else => return &.{}, - }; - - const data = switch (root.get("data") orelse return &.{}) { - .object => |obj| obj, - else => return &.{}, - }; - - const results_arr = switch (data.get("result") orelse return &.{}) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var out: std.ArrayListUnmanaged(PodMetricValue) = .empty; - for (results_arr) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - - // metric labels - const metric = switch (obj.get("metric") orelse continue) { - .object => |o| o, - else => continue, - }; - - // value is [timestamp, "value_string"] - const value_arr = switch (obj.get("value") orelse continue) { - .array => |arr| arr.items, - else => continue, - }; - if (value_arr.len < 2) continue; - - const val_str = switch (value_arr[1]) { - .string => |s| s, - else => continue, - }; - const val = std.fmt.parseFloat(f64, val_str) catch continue; - - // Clone metric labels so they survive parsed.deinit() - var cloned_labels = std.json.ObjectMap.init(alloc); - var it = metric.iterator(); - while (it.next()) |entry| { - const key = alloc.dupe(u8, entry.key_ptr.*) catch continue; - const label_val = switch (entry.value_ptr.*) { - .string => |s| std.json.Value{ .string = alloc.dupe(u8, s) catch continue }, - else => continue, - }; - cloned_labels.put(key, label_val) catch continue; - } - - out.append(alloc, .{ - .labels = cloned_labels, - .value = val, - }) catch continue; - } - - return out.toOwnedSlice(alloc) catch &.{}; - } - - pub fn deinit(self: *MetricsClient) void { - if (self.available and self.endpoint.len > 0) { - self.allocator.free(self.endpoint); - } - } -}; diff --git a/tui/src/api/proxmox.zig b/tui/src/api/proxmox.zig deleted file mode 100644 index deccde2..0000000 --- a/tui/src/api/proxmox.zig +++ /dev/null @@ -1,253 +0,0 @@ -const std = @import("std"); -const config = @import("../config.zig"); -const http = @import("http_client.zig"); -const Allocator = std.mem.Allocator; - -pub const VmStatus = struct { - vmid: i64, - name: []const u8, - status: []const u8, - node: []const u8, - maxdisk: i64, -}; - -pub const StoragePool = struct { - name: []const u8, - node: []const u8, - pool_type: []const u8, - status: []const u8, - disk: i64, - maxdisk: i64, -}; - -pub const NodeStatus = struct { - node: []const u8, - status: []const u8, - cpu: f64, - mem: i64, - maxmem: i64, - uptime: i64, -}; - -pub const BackupEntry = struct { - volid: []const u8, - node: []const u8, - storage: []const u8, - size: i64, - ctime: i64, - vmid: i64, - format: []const u8, -}; - -pub const ProxmoxClient = struct { - client: http.HttpClient, - allocator: Allocator, - - pub fn init(allocator: Allocator, pve: config.ProxmoxCluster) ProxmoxClient { - return .{ - .client = http.HttpClient.init(allocator, pve), - .allocator = allocator, - }; - } - - /// Fetch all VM resources across the PVE cluster. - pub fn getClusterResources(self: *ProxmoxClient) ![]VmStatus { - const body = self.client.get("/api2/json/cluster/resources?type=vm") catch { - return &.{}; - }; - defer self.allocator.free(body); - - var parsed = http.parseJsonResponse(self.allocator, body) catch { - return &.{}; - }; - defer parsed.deinit(); - - const data_val = switch (parsed.value) { - .object => |obj| obj.get("data") orelse return &.{}, - else => return &.{}, - }; - const items = switch (data_val) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var results: std.ArrayListUnmanaged(VmStatus) = .empty; - for (items) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - - // Only include QEMU VMs (not LXC containers) - const res_type = http.jsonStr(obj, "type", ""); - if (!std.mem.eql(u8, res_type, "qemu")) continue; - - const name = try self.allocator.dupe(u8, http.jsonStr(obj, "name", "unknown")); - const status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")); - const node = try self.allocator.dupe(u8, http.jsonStr(obj, "node", "unknown")); - - try results.append(self.allocator, .{ - .vmid = http.jsonInt(obj, "vmid", 0), - .name = name, - .status = status, - .node = node, - .maxdisk = http.jsonInt(obj, "maxdisk", 0), - }); - } - - return results.toOwnedSlice(self.allocator); - } - - /// Fetch status for a specific PVE node. - pub fn getNodeStatus(self: *ProxmoxClient, node: []const u8) !?NodeStatus { - const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/status", .{node}); - defer self.allocator.free(path); - - const body = self.client.get(path) catch return null; - defer self.allocator.free(body); - - var parsed = http.parseJsonResponse(self.allocator, body) catch return null; - defer parsed.deinit(); - - const data_val = switch (parsed.value) { - .object => |obj| obj.get("data") orelse return null, - else => return null, - }; - const obj = switch (data_val) { - .object => |o| o, - else => return null, - }; - - return .{ - .node = try self.allocator.dupe(u8, node), - .status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")), - .cpu = http.jsonFloat(obj, "cpu", 0), - .mem = http.jsonInt(obj, "mem", 0), - .maxmem = http.jsonInt(obj, "maxmem", 0), - .uptime = http.jsonInt(obj, "uptime", 0), - }; - } - - /// Fetch all storage pools across the PVE cluster. - pub fn getStoragePools(self: *ProxmoxClient) ![]StoragePool { - const body = self.client.get("/api2/json/cluster/resources?type=storage") catch { - return &.{}; - }; - defer self.allocator.free(body); - - var parsed = http.parseJsonResponse(self.allocator, body) catch { - return &.{}; - }; - defer parsed.deinit(); - - const data_val = switch (parsed.value) { - .object => |obj| obj.get("data") orelse return &.{}, - else => return &.{}, - }; - const items = switch (data_val) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var results: std.ArrayListUnmanaged(StoragePool) = .empty; - for (items) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - - const name = try self.allocator.dupe(u8, http.jsonStr(obj, "storage", "unknown")); - const node = try self.allocator.dupe(u8, http.jsonStr(obj, "node", "unknown")); - const pool_type = try self.allocator.dupe(u8, http.jsonStr(obj, "plugintype", http.jsonStr(obj, "type", "unknown"))); - const status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")); - - try results.append(self.allocator, .{ - .name = name, - .node = node, - .pool_type = pool_type, - .status = status, - .disk = http.jsonInt(obj, "disk", 0), - .maxdisk = http.jsonInt(obj, "maxdisk", 0), - }); - } - - return results.toOwnedSlice(self.allocator); - } - - /// List vzdump backups from a specific storage pool on a node. - pub fn listBackups(self: *ProxmoxClient, node: []const u8, storage: []const u8) ![]BackupEntry { - const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/storage/{s}/content?content=backup", .{ node, storage }); - defer self.allocator.free(path); - - const body = self.client.get(path) catch return &.{}; - defer self.allocator.free(body); - - var parsed = http.parseJsonResponse(self.allocator, body) catch return &.{}; - defer parsed.deinit(); - - const data_val = switch (parsed.value) { - .object => |obj| obj.get("data") orelse return &.{}, - else => return &.{}, - }; - const items = switch (data_val) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var results: std.ArrayListUnmanaged(BackupEntry) = .empty; - for (items) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - - const volid = try self.allocator.dupe(u8, http.jsonStr(obj, "volid", "")); - const format = try self.allocator.dupe(u8, http.jsonStr(obj, "format", "unknown")); - - try results.append(self.allocator, .{ - .volid = volid, - .node = try self.allocator.dupe(u8, node), - .storage = try self.allocator.dupe(u8, storage), - .size = http.jsonInt(obj, "size", 0), - .ctime = http.jsonInt(obj, "ctime", 0), - .vmid = http.jsonInt(obj, "vmid", 0), - .format = format, - }); - } - - return results.toOwnedSlice(self.allocator); - } - - /// Delete a backup by volume ID. - pub fn deleteBackup(self: *ProxmoxClient, node: []const u8, storage: []const u8, volid: []const u8) !void { - // Percent-encode the volid as a single path segment. - var encoded: std.ArrayListUnmanaged(u8) = .empty; - defer encoded.deinit(self.allocator); - for (volid) |c| { - if (isUnreserved(c)) { - try encoded.append(self.allocator, c); - } else { - try std.fmt.format(encoded.writer(self.allocator), "%{X:0>2}", .{c}); - } - } - - const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/storage/{s}/content/{s}", .{ - node, storage, encoded.items, - }); - defer self.allocator.free(path); - - const body = try self.client.delete(path); - self.allocator.free(body); - } - - pub fn deinit(self: *ProxmoxClient) void { - self.client.deinit(); - } -}; - -fn isUnreserved(c: u8) bool { - return switch (c) { - 'A'...'Z', 'a'...'z', '0'...'9', '-', '.', '_', '~' => true, - else => false, - }; -} diff --git a/tui/src/api/talos.zig b/tui/src/api/talos.zig deleted file mode 100644 index 3086e38..0000000 --- a/tui/src/api/talos.zig +++ /dev/null @@ -1,170 +0,0 @@ -const std = @import("std"); -const config = @import("../config.zig"); -const http = @import("http_client.zig"); -const Allocator = std.mem.Allocator; - -pub const TalosVersion = struct { - node: []const u8, - talos_version: []const u8, - kubernetes_version: []const u8, -}; - -pub const EtcdMember = struct { - hostname: []const u8, - id: u64, - is_learner: bool, -}; - -pub const TalosClient = struct { - allocator: Allocator, - config_path: []const u8, - context: []const u8, - - pub fn init(allocator: Allocator, talos_cfg: config.TalosConfig) TalosClient { - return .{ - .allocator = allocator, - .config_path = talos_cfg.config_path, - .context = talos_cfg.context, - }; - } - - /// Get Talos and Kubernetes version for a specific node. - /// Returns null if the node is unreachable. - pub fn getVersion(self: *TalosClient, node_ip: []const u8) ?TalosVersion { - const output = self.runTalosctl(&.{ - "version", "--nodes", node_ip, "--short", - }) orelse return null; - defer self.allocator.free(output); - - // Parse the JSON output. talosctl version -o json outputs messages array. - var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ - .ignore_unknown_fields = true, - .allocate = .alloc_always, - }) catch return null; - defer parsed.deinit(); - - // talosctl version -o json structure: - // {"messages":[{"metadata":{"hostname":"..."},"version":{"tag":"v1.9.x","...":"..."}}]} - const root = switch (parsed.value) { - .object => |obj| obj, - else => return null, - }; - - const messages = switch (root.get("messages") orelse return null) { - .array => |arr| arr.items, - else => return null, - }; - if (messages.len == 0) return null; - - const msg = switch (messages[0]) { - .object => |obj| obj, - else => return null, - }; - - // Extract version info - const version_obj = switch (msg.get("version") orelse return null) { - .object => |obj| obj, - else => return null, - }; - - const talos_ver = http.jsonStr(version_obj, "tag", "unknown"); - // Kubernetes version is typically in a separate field or needs a different query - // For now extract what's available - const k8s_ver = http.jsonStr(version_obj, "kubernetes_version", "-"); - - return .{ - .node = self.allocator.dupe(u8, node_ip) catch return null, - .talos_version = self.allocator.dupe(u8, talos_ver) catch return null, - .kubernetes_version = self.allocator.dupe(u8, k8s_ver) catch return null, - }; - } - - /// Get etcd cluster membership info. - /// Returns empty slice if unreachable. - pub fn getEtcdMembers(self: *TalosClient) []EtcdMember { - const output = self.runTalosctl(&.{"etcd", "members"}) orelse return &.{}; - defer self.allocator.free(output); - - var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ - .ignore_unknown_fields = true, - .allocate = .alloc_always, - }) catch return &.{}; - defer parsed.deinit(); - - const root = switch (parsed.value) { - .object => |obj| obj, - else => return &.{}, - }; - - const messages = switch (root.get("messages") orelse return &.{}) { - .array => |arr| arr.items, - else => return &.{}, - }; - if (messages.len == 0) return &.{}; - - const msg = switch (messages[0]) { - .object => |obj| obj, - else => return &.{}, - }; - - const members = switch (msg.get("members") orelse return &.{}) { - .array => |arr| arr.items, - else => return &.{}, - }; - - var results: std.ArrayListUnmanaged(EtcdMember) = .empty; - for (members) |item| { - const obj = switch (item) { - .object => |o| o, - else => continue, - }; - results.append(self.allocator, .{ - .hostname = self.allocator.dupe(u8, http.jsonStr(obj, "hostname", "unknown")) catch continue, - .id = @intCast(http.jsonInt(obj, "id", 0)), - .is_learner = blk: { - const val = obj.get("is_learner") orelse break :blk false; - break :blk switch (val) { - .bool => |b| b, - else => false, - }; - }, - }) catch continue; - } - - return results.toOwnedSlice(self.allocator) catch &.{}; - } - - /// Run a talosctl command with standard flags and return stdout. - /// Returns null on any failure. - fn runTalosctl(self: *TalosClient, extra_args: []const []const u8) ?[]const u8 { - var argv: std.ArrayListUnmanaged([]const u8) = .empty; - defer argv.deinit(self.allocator); - - argv.append(self.allocator, "talosctl") catch return null; - argv.appendSlice(self.allocator, extra_args) catch return null; - argv.appendSlice(self.allocator, &.{ - "--talosconfig", self.config_path, - "--context", self.context, - "-o", "json", - }) catch return null; - - const result = std.process.Child.run(.{ - .allocator = self.allocator, - .argv = argv.items, - .max_output_bytes = 512 * 1024, - }) catch return null; - defer self.allocator.free(result.stderr); - - const term = result.term; - if (term == .Exited and term.Exited == 0) { - return result.stdout; - } - - self.allocator.free(result.stdout); - return null; - } - - pub fn deinit(self: *TalosClient) void { - _ = self; - } -}; diff --git a/tui/src/app.rs b/tui/src/app.rs new file mode 100644 index 0000000..f6c0855 --- /dev/null +++ b/tui/src/app.rs @@ -0,0 +1,333 @@ +use crate::config::Config; +use crate::models::Snapshot; +use crate::poller::{PollerCommand, PollerHandle}; +use crate::views::{backups, cluster, performance, storage}; +use crossterm::event::{self, Event, KeyCode, KeyEventKind}; +use ratatui::{ + DefaultTerminal, Frame, + layout::{Constraint, Direction, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, Clear, Paragraph, Tabs, Wrap}, +}; +use std::time::Duration; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ActiveView { + Cluster, + Storage, + Backups, + Performance, +} + +impl ActiveView { + fn index(self) -> usize { + match self { + Self::Cluster => 0, + Self::Storage => 1, + Self::Backups => 2, + Self::Performance => 3, + } + } + + fn next(self) -> Self { + match self { + Self::Cluster => Self::Storage, + Self::Storage => Self::Backups, + Self::Backups => Self::Performance, + Self::Performance => Self::Cluster, + } + } + + fn prev(self) -> Self { + match self { + Self::Cluster => Self::Performance, + Self::Storage => Self::Cluster, + Self::Backups => Self::Storage, + Self::Performance => Self::Backups, + } + } +} + +pub struct App { + config: Config, + snapshot: Snapshot, + active_view: ActiveView, + show_help: bool, + should_quit: bool, + cluster_state: cluster::ClusterViewState, + storage_state: storage::StorageViewState, + backups_state: backups::BackupsViewState, + performance_state: performance::PerformanceViewState, + poller: PollerHandle, +} + +impl App { + pub fn new(config: Config) -> Self { + let poller = PollerHandle::spawn(config.clone()); + Self { + config, + snapshot: Snapshot { + loading: true, + ..Snapshot::default() + }, + active_view: ActiveView::Cluster, + show_help: false, + should_quit: false, + cluster_state: cluster::ClusterViewState::default(), + storage_state: storage::StorageViewState::default(), + backups_state: backups::BackupsViewState::default(), + performance_state: performance::PerformanceViewState::default(), + poller, + } + } + + pub fn run(mut self, terminal: &mut DefaultTerminal) -> anyhow::Result<()> { + while !self.should_quit { + while let Ok(snapshot) = self.poller.snapshots.try_recv() { + self.snapshot = snapshot; + } + + terminal.draw(|frame| self.draw(frame))?; + + if event::poll(Duration::from_millis(100))? + && let Event::Key(key) = event::read()? + && key.kind == KeyEventKind::Press + { + self.handle_key(key); + } + } + Ok(()) + } + + fn handle_key(&mut self, key: event::KeyEvent) { + if self.show_help { + match key.code { + KeyCode::Esc | KeyCode::Char('?') => self.show_help = false, + _ => {} + } + return; + } + + match key.code { + KeyCode::Char('q') => { + self.should_quit = true; + return; + } + KeyCode::Char('?') => { + self.show_help = true; + return; + } + KeyCode::Char('r') => { + let _ = self.poller.commands.send(PollerCommand::RefreshNow); + return; + } + KeyCode::Char('1') => { + self.active_view = ActiveView::Cluster; + return; + } + KeyCode::Char('2') => { + self.active_view = ActiveView::Storage; + return; + } + KeyCode::Char('3') => { + self.active_view = ActiveView::Backups; + return; + } + KeyCode::Char('4') => { + self.active_view = ActiveView::Performance; + return; + } + KeyCode::Tab => { + self.active_view = self.active_view.next(); + return; + } + KeyCode::BackTab => { + self.active_view = self.active_view.prev(); + return; + } + _ => {} + } + + match self.active_view { + ActiveView::Cluster => self + .cluster_state + .handle_key(key, self.snapshot.cluster_rows.len()), + ActiveView::Storage => self.storage_state.handle_key( + key, + self.snapshot.storage_pools.len(), + self.snapshot.vm_disks.len(), + ), + ActiveView::Backups => { + if let Some(action) = self.backups_state.handle_key(key, &self.snapshot) { + let _ = self + .poller + .commands + .send(PollerCommand::DeleteBackup(action)); + } + } + ActiveView::Performance => self.performance_state.handle_key( + key, + performance::visible_pod_count(&self.snapshot, self.performance_state.ns_index), + performance::namespace_count(&self.snapshot), + ), + } + } + + fn draw(&mut self, frame: &mut Frame) { + let area = frame.area(); + let outer = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Length(1), + Constraint::Min(0), + Constraint::Length(1), + ]) + .split(area); + + self.draw_tabs(frame, outer[0]); + if area.width < 80 || area.height < 24 { + frame.render_widget( + Paragraph::new("Terminal too small (min 80x24)") + .block(Block::default().borders(Borders::ALL).title("vitui")), + outer[1], + ); + } else if self.snapshot.loading { + frame.render_widget( + Paragraph::new("Loading data…") + .block(Block::default().borders(Borders::ALL).title("vitui")), + outer[1], + ); + } else { + match self.active_view { + ActiveView::Cluster => { + cluster::render(frame, outer[1], &mut self.cluster_state, &self.snapshot) + } + ActiveView::Storage => storage::render( + frame, + outer[1], + &mut self.storage_state, + &self.snapshot, + self.config.tui.storage.warn_threshold, + self.config.tui.storage.crit_threshold, + ), + ActiveView::Backups => { + backups::render(frame, outer[1], &mut self.backups_state, &self.snapshot) + } + ActiveView::Performance => performance::render( + frame, + outer[1], + &mut self.performance_state, + &self.snapshot, + ), + } + } + self.draw_status(frame, outer[2]); + + if self.show_help { + self.draw_help(frame, area); + } + } + + fn draw_tabs(&self, frame: &mut Frame, area: Rect) { + let titles = ["1:Cluster", "2:Storage", "3:Backups", "4:Perf"] + .into_iter() + .map(Line::from) + .collect::>(); + let tabs = Tabs::new(titles) + .select(self.active_view.index()) + .style(Style::default().fg(Color::White).bg(Color::DarkGray)) + .highlight_style( + Style::default() + .fg(Color::Black) + .bg(Color::Blue) + .add_modifier(Modifier::BOLD), + ); + frame.render_widget(tabs, area); + } + + fn draw_status(&self, frame: &mut Frame, area: Rect) { + let refresh = self + .snapshot + .last_refresh_label + .clone() + .unwrap_or_else(|| "never".to_string()); + let mut spans = vec![ + Span::raw(format!(" refresh={} ", refresh)), + Span::styled( + "r", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" refresh "), + Span::styled( + "?", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" help "), + Span::styled( + "q", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" quit"), + ]; + if let Some(error) = &self.snapshot.last_error { + spans.push(Span::raw(" | ")); + spans.push(Span::styled( + truncate_for_status(error), + Style::default().fg(Color::Yellow), + )); + } + frame.render_widget( + Paragraph::new(Line::from(spans)).block(Block::default().borders(Borders::ALL)), + area, + ); + } + + fn draw_help(&self, frame: &mut Frame, area: Rect) { + let popup = centered_rect(area, 72, 60); + frame.render_widget(Clear, popup); + let text = "Global keys\n\n1-4 / Tab / Shift-Tab switch views\nr refresh\n? help\nq quit\n\nCluster\n j/k, arrows, g/G move selection\n\nStorage\n h/l or left/right switch between pools/disks\n j/k, arrows, g/G move selection\n\nBackups\n / filter\n d delete selected PVE backup\n y/n confirm/cancel delete\n\nPerformance\n s / S cycle sort / reverse sort\n n cycle namespace filter\n j/k, arrows, g/G move selection"; + frame.render_widget( + Paragraph::new(text) + .wrap(Wrap { trim: true }) + .block(Block::default().borders(Borders::ALL).title("Help")), + popup, + ); + } +} + +fn centered_rect(area: Rect, width_percent: u16, height_percent: u16) -> Rect { + let vertical = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Percentage((100 - height_percent) / 2), + Constraint::Percentage(height_percent), + Constraint::Percentage((100 - height_percent) / 2), + ]) + .split(area); + Layout::default() + .direction(Direction::Horizontal) + .constraints([ + Constraint::Percentage((100 - width_percent) / 2), + Constraint::Percentage(width_percent), + Constraint::Percentage((100 - width_percent) / 2), + ]) + .split(vertical[1])[1] +} + +fn truncate_for_status(value: &str) -> String { + let max = 120; + if value.chars().count() <= max { + value.to_string() + } else { + let mut out = value.chars().take(max - 1).collect::(); + out.push('…'); + out + } +} diff --git a/tui/src/app.zig b/tui/src/app.zig deleted file mode 100644 index 3d312ff..0000000 --- a/tui/src/app.zig +++ /dev/null @@ -1,534 +0,0 @@ -const std = @import("std"); -const vaxis = @import("vaxis"); -const config = @import("config.zig"); -const poll = @import("poll.zig"); -const ClusterView = @import("views/cluster.zig").ClusterView; -const StorageView = @import("views/storage.zig").StorageView; -const backups_view = @import("views/backups.zig"); -const BackupView = backups_view.BackupView; -const DeleteAction = backups_view.DeleteAction; -const PerformanceView = @import("views/performance.zig").PerformanceView; -const proxmox_api = @import("api/proxmox.zig"); - -pub const Event = union(enum) { - key_press: vaxis.Key, - key_release: vaxis.Key, - mouse: vaxis.Mouse, - mouse_leave, - focus_in, - focus_out, - paste_start, - paste_end, - paste: []const u8, - color_report: vaxis.Color.Report, - color_scheme: vaxis.Color.Scheme, - winsize: vaxis.Winsize, - cap_kitty_keyboard, - cap_kitty_graphics, - cap_rgb, - cap_sgr_pixels, - cap_unicode, - cap_da1, - cap_color_scheme_updates, - cap_multi_cursor, - // Custom events - data_refresh, -}; - -pub const View = enum(u8) { - cluster = 0, - storage = 1, - backups = 2, - performance = 3, - - pub fn label(self: View) []const u8 { - return switch (self) { - .cluster => " 1:Cluster ", - .storage => " 2:Storage ", - .backups => " 3:Backups ", - .performance => " 4:Perf ", - }; - } -}; - -const min_width: u16 = 80; -const min_height: u16 = 24; - -pub const App = struct { - vx: vaxis.Vaxis, - tty: vaxis.Tty, - loop: vaxis.Loop(Event), - cfg: config.Config, - active_view: View = .cluster, - show_help: bool = false, - should_quit: bool = false, - alloc: std.mem.Allocator, - - // Cluster view + polling - cluster_view: ClusterView, - cluster_state: *poll.ClusterState, - // Storage view - storage_view: StorageView, - storage_state: *poll.StorageState, - // Backup view - backup_view: BackupView, - backup_state: *poll.BackupState, - // Performance view - perf_view: PerformanceView, - perf_state: *poll.PerfState, - // Poller (shared) - poller: *poll.Poller, - - tty_buf: [4096]u8 = undefined, - - pub fn init(alloc: std.mem.Allocator, cfg: config.Config) !App { - const state = try alloc.create(poll.ClusterState); - state.* = poll.ClusterState.init(alloc); - - const storage_state = try alloc.create(poll.StorageState); - storage_state.* = poll.StorageState.init(alloc); - - const backup_state = try alloc.create(poll.BackupState); - backup_state.* = poll.BackupState.init(alloc); - - const perf_state = try alloc.create(poll.PerfState); - perf_state.* = poll.PerfState.init(alloc); - - const poller = try alloc.create(poll.Poller); - // cfg pointer set in run() after App is at its final address - poller.* = poll.Poller.init(alloc, state, storage_state, backup_state, perf_state, undefined, cfg.tui_settings.refresh_interval_ms); - - var app: App = .{ - .vx = try vaxis.init(alloc, .{}), - .tty = undefined, - .loop = undefined, - .cfg = cfg, - .alloc = alloc, - .cluster_view = ClusterView.init(), - .cluster_state = state, - .storage_view = StorageView.init(cfg.tui_settings.warn_threshold, cfg.tui_settings.crit_threshold), - .storage_state = storage_state, - .backup_view = BackupView.init(alloc, cfg.tui_settings.stale_days), - .backup_state = backup_state, - .perf_view = PerformanceView.init(), - .perf_state = perf_state, - .poller = poller, - }; - app.tty = try vaxis.Tty.init(&app.tty_buf); - // `App` is returned by value, so pointer-bearing runtime fields must be - // wired after the caller has the app at its final address. - return app; - } - - pub fn restoreTerminal(self: *App, alloc: std.mem.Allocator) void { - // Signal poller to stop (non-blocking) so it can begin winding down - self.poller.should_stop.store(true, .release); - - // `vaxis.Loop.stop()` wakes the reader by writing a device-status - // query, which can hang shutdown if the terminal never answers it. - // Mark the loop as quitting, restore the screen, then close the TTY. - // The normal quit path exits the process immediately after this, so we - // intentionally do not wait for background threads here. - self.loop.should_quit = true; - self.vx.deinit(alloc, self.tty.writer()); - self.tty.deinit(); - } - - pub fn deinit(self: *App, alloc: std.mem.Allocator) void { - self.restoreTerminal(alloc); - - // Now wait for the poller thread to actually finish - if (self.poller.thread) |t| { - t.join(); - self.poller.thread = null; - } - - self.cluster_state.deinit(); - self.storage_state.deinit(); - self.backup_state.deinit(); - self.perf_state.deinit(); - alloc.destroy(self.cluster_state); - alloc.destroy(self.storage_state); - alloc.destroy(self.backup_state); - alloc.destroy(self.perf_state); - alloc.destroy(self.poller); - } - - pub fn run(self: *App, alloc: std.mem.Allocator) !void { - // Now that self is at its final address, wire up runtime pointers. - self.poller.cfg = &self.cfg; - self.loop = .{ .tty = &self.tty, .vaxis = &self.vx }; - self.poller.setRefreshNotifier(self, postRefreshEvent); - - try self.loop.init(); - try self.loop.start(); - - // Start background polling - try self.poller.start(); - - try self.vx.enterAltScreen(self.tty.writer()); - try self.vx.queryTerminal(self.tty.writer(), 1_000_000_000); - - while (!self.should_quit) { - const event = self.loop.nextEvent(); - try self.handleEvent(alloc, event); - if (self.should_quit) break; - try self.draw(); - try self.vx.render(self.tty.writer()); - } - } - - fn postRefreshEvent(context: *anyopaque) void { - const self: *App = @ptrCast(@alignCast(context)); - _ = self.loop.tryPostEvent(.data_refresh); - } - - fn handleEvent(self: *App, alloc: std.mem.Allocator, event: Event) !void { - switch (event) { - .key_press => |key| self.handleKey(key), - .winsize => |ws| try self.vx.resize(alloc, self.tty.writer(), ws), - .data_refresh => {}, // Just triggers redraw - else => {}, - } - } - - fn handleKey(self: *App, key: vaxis.Key) void { - // Help overlay dismissal - if (self.show_help) { - if (key.matches('?', .{}) or key.matches(vaxis.Key.escape, .{})) { - self.show_help = false; - } - return; - } - - // Global keys - if (key.matches('q', .{}) or key.matches('q', .{ .ctrl = true })) { - self.should_quit = true; - return; - } - if (key.matches('?', .{})) { - self.show_help = true; - return; - } - if (key.matches('r', .{})) { - self.poller.triggerRefresh(); - return; - } - - // View switching: 1-4 - if (key.matches('1', .{})) { - self.active_view = .cluster; - } else if (key.matches('2', .{})) { - self.active_view = .storage; - } else if (key.matches('3', .{})) { - self.active_view = .backups; - } else if (key.matches('4', .{})) { - self.active_view = .performance; - } else if (key.matches(vaxis.Key.tab, .{})) { - self.cycleView(); - } else if (key.matches(vaxis.Key.tab, .{ .shift = true })) { - self.cycleViewBack(); - } else { - // Delegate to active view - switch (self.active_view) { - .cluster => self.cluster_view.handleKey(key), - .storage => self.storage_view.handleKey(key), - .backups => self.backup_view.handleKey(key), - .performance => self.perf_view.handleKey(key), - } - } - } - - fn cycleView(self: *App) void { - const cur = @intFromEnum(self.active_view); - self.active_view = @enumFromInt((cur + 1) % 4); - } - - fn cycleViewBack(self: *App) void { - const cur = @intFromEnum(self.active_view); - self.active_view = @enumFromInt((cur + 3) % 4); - } - - fn draw(self: *App) !void { - const win = self.vx.window(); - win.clear(); - - if (win.width < min_width or win.height < min_height) { - self.drawMinSizeMessage(win); - return; - } - - // Top bar (row 0) - const top_bar = win.child(.{ .height = 1 }); - self.drawTopBar(top_bar); - - // Status bar (last row) - const status_bar = win.child(.{ - .y_off = @intCast(win.height -| 1), - .height = 1, - }); - self.drawStatusBar(status_bar); - - // Content area - const content = win.child(.{ - .y_off = 1, - .height = win.height -| 2, - }); - self.drawContent(content); - - // Help overlay on top - if (self.show_help) { - self.drawHelpOverlay(win); - } - } - - fn drawMinSizeMessage(self: *App, win: vaxis.Window) void { - _ = self; - const msg = "Terminal too small (min 80x24)"; - const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - const row: u16 = win.height / 2; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 1 } } }}, .{ - .col_offset = col, - .row_offset = row, - }); - } - - fn drawTopBar(self: *App, win: vaxis.Window) void { - win.fill(.{ .style = .{ .bg = .{ .index = 8 } } }); - - var col: u16 = 0; - const views = [_]View{ .cluster, .storage, .backups, .performance }; - for (views) |view| { - const lbl = view.label(); - const is_active = (view == self.active_view); - const style: vaxis.Style = if (is_active) - .{ .fg = .{ .index = 0 }, .bg = .{ .index = 4 }, .bold = true } - else - .{ .fg = .{ .index = 7 }, .bg = .{ .index = 8 } }; - - _ = win.print(&.{.{ .text = lbl, .style = style }}, .{ - .col_offset = col, - .wrap = .none, - }); - col += @intCast(lbl.len); - } - - const title = " vitui "; - if (win.width > title.len + col) { - const title_col: u16 = win.width - @as(u16, @intCast(title.len)); - _ = win.print(&.{.{ .text = title, .style = .{ - .fg = .{ .index = 6 }, - .bg = .{ .index = 8 }, - .bold = true, - } }}, .{ - .col_offset = title_col, - .wrap = .none, - }); - } - } - - fn drawStatusBar(self: *App, win: vaxis.Window) void { - win.fill(.{ .style = .{ .bg = .{ .index = 8 } } }); - - const bar_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 8 } }; - - // Left: keybinding hints - const hint = " q:quit ?:help 1-4:views r:refresh j/k:nav "; - _ = win.print(&.{.{ .text = hint, .style = bar_style }}, .{ .wrap = .none }); - - // Right: refresh status - var buf: [64]u8 = undefined; - const status_text = blk: { - if (self.cluster_state.isLoading()) { - break :blk "Loading..."; - } - const last = self.cluster_state.getLastRefresh(); - if (last == 0) break :blk ""; - const now = std.time.timestamp(); - const ago = now - last; - if (ago < 0) break :blk ""; - break :blk std.fmt.bufPrint(&buf, " {d}s ago ", .{ago}) catch ""; - }; - if (status_text.len > 0 and win.width > status_text.len + hint.len) { - const status_col: u16 = win.width - @as(u16, @intCast(status_text.len)); - _ = win.print(&.{.{ .text = status_text, .style = bar_style }}, .{ - .col_offset = status_col, - .wrap = .none, - }); - } - } - - fn drawContent(self: *App, win: vaxis.Window) void { - switch (self.active_view) { - .cluster => { - self.cluster_state.lock(); - defer self.cluster_state.unlock(); - - if (self.cluster_state.is_loading and self.cluster_state.rows.len == 0) { - self.drawPlaceholder(win, "Loading cluster data..."); - } else { - self.cluster_view.draw(self.alloc, win, self.cluster_state.rows); - } - }, - .storage => { - self.storage_state.lock(); - defer self.storage_state.unlock(); - - if (self.storage_state.is_loading and self.storage_state.pools.len == 0) { - self.drawPlaceholder(win, "Loading storage data..."); - } else { - self.storage_view.draw(self.alloc, win, self.storage_state.pools, self.storage_state.vm_disks); - } - }, - .backups => { - var action: ?DeleteAction = null; - self.backup_state.lock(); - { - defer self.backup_state.unlock(); - - if (self.backup_state.is_loading and - self.backup_state.backups.len == 0 and - self.backup_state.k8s_backups.len == 0) - { - self.drawPlaceholder(win, "Loading backup data..."); - } else { - self.backup_view.draw(win, self.backup_state.backups, self.backup_state.k8s_backups); - - // Copy action data while the backing rows are still locked. - action = self.backup_view.consumeDeleteAction(); - } - } - - if (action) |owned_action| { - defer self.alloc.free(owned_action.proxmox_cluster); - defer self.alloc.free(owned_action.node); - defer self.alloc.free(owned_action.storage); - defer self.alloc.free(owned_action.volid); - self.executeDelete(owned_action); - } - }, - .performance => { - self.perf_state.lock(); - defer self.perf_state.unlock(); - - if (self.perf_state.is_loading and self.perf_state.hosts.len == 0) { - self.drawPlaceholder(win, "Loading performance data..."); - } else { - self.perf_view.draw( - self.alloc, - win, - self.perf_state.hosts, - self.perf_state.pods, - self.perf_state.metrics_available, - ); - } - }, - } - } - - fn executeDelete(self: *App, action: DeleteAction) void { - for (self.cfg.proxmox.clusters) |pc| { - if (!std.mem.eql(u8, pc.name, action.proxmox_cluster)) continue; - var client = proxmox_api.ProxmoxClient.init(self.alloc, pc); - defer client.deinit(); - client.deleteBackup(action.node, action.storage, action.volid) catch return; - // Trigger refresh to show updated list - self.poller.triggerRefresh(); - return; - } - } - - fn drawPlaceholder(self: *App, win: vaxis.Window, label: []const u8) void { - _ = self; - const col: u16 = if (win.width > label.len) (win.width - @as(u16, @intCast(label.len))) / 2 else 0; - const row: u16 = win.height / 2; - _ = win.print(&.{.{ .text = label, .style = .{ - .fg = .{ .index = 6 }, - .bold = true, - } }}, .{ - .col_offset = col, - .row_offset = row, - .wrap = .none, - }); - } - - fn drawHelpOverlay(self: *App, win: vaxis.Window) void { - const box_w: u16 = 48; - const box_h: u16 = 22; - const x: i17 = @intCast(if (win.width > box_w) (win.width - box_w) / 2 else 0); - const y: i17 = @intCast(if (win.height > box_h) (win.height - box_h) / 2 else 0); - - const help_win = win.child(.{ - .x_off = x, - .y_off = y, - .width = box_w, - .height = box_h, - .border = .{ .where = .all, .style = .{ .fg = .{ .index = 4 } } }, - }); - - help_win.fill(.{ .style = .{ .bg = .{ .index = 0 } } }); - - const title_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 0 }, .bold = true }; - const text_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 0 } }; - const section_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 0 }, .bold = true }; - - var row: u16 = 0; - const w = help_win; - - _ = w.print(&.{.{ .text = " Keybindings", .style = title_style }}, .{ .row_offset = row, .wrap = .none }); - row += 1; - - const global = [_][]const u8{ - " q Quit", - " ? Toggle help", - " 1-4 Switch view", - " Tab/S-Tab Next/Prev view", - " j/k Navigate down/up", - " g/G Top/Bottom", - " r Refresh all data", - " Esc Close overlay", - }; - for (global) |line| { - row += 1; - if (row >= w.height) break; - _ = w.print(&.{.{ .text = line, .style = text_style }}, .{ .row_offset = row, .wrap = .none }); - } - - // View-specific hints - row += 1; - if (row < w.height) { - const view_title = switch (self.active_view) { - .cluster => " Cluster View", - .storage => " Storage View", - .backups => " Backups View", - .performance => " Performance View", - }; - _ = w.print(&.{.{ .text = view_title, .style = section_style }}, .{ .row_offset = row, .wrap = .none }); - row += 1; - } - - const view_lines: []const []const u8 = switch (self.active_view) { - .cluster => &.{ - " (no extra keys)", - }, - .storage => &.{ - " Tab Switch pools/disks", - }, - .backups => &.{ - " d Delete selected backup", - " / Search/filter", - " Esc Clear filter", - }, - .performance => &.{ - " s Cycle sort column", - " S Reverse sort direction", - " n Cycle namespace filter", - }, - }; - for (view_lines) |line| { - if (row >= w.height) break; - _ = w.print(&.{.{ .text = line, .style = text_style }}, .{ .row_offset = row, .wrap = .none }); - row += 1; - } - } -}; diff --git a/tui/src/config.rs b/tui/src/config.rs new file mode 100644 index 0000000..671c730 --- /dev/null +++ b/tui/src/config.rs @@ -0,0 +1,519 @@ +use anyhow::{Context, Result, anyhow, bail}; +use serde::Deserialize; +use std::{ + collections::HashSet, + env, fs, + path::{Path, PathBuf}, + time::Duration, +}; + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub version: String, + pub proxmox: ProxmoxConfig, + pub talos: TalosConfig, + pub clusters: Vec, + #[serde(default)] + pub tui: TuiConfig, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ProxmoxConfig { + pub clusters: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ProxmoxCluster { + pub name: String, + pub endpoint: String, + pub token_id: String, + pub token_secret: String, + #[serde(default = "default_tls_verify")] + pub tls_verify: bool, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TalosConfig { + pub config_path: String, + #[serde(default)] + pub context: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[allow(dead_code)] +pub struct ClusterConfig { + pub name: String, + pub proxmox_cluster: String, + pub endpoint: String, + #[serde(default)] + pub config_source: Option, + pub nodes: Vec, + #[serde(default)] + pub validation: ValidationConfig, + #[serde(default)] + pub upgrade: UpgradeConfig, +} + +#[derive(Debug, Clone, Deserialize)] +#[allow(dead_code)] +pub struct ConfigSource { + pub r#type: String, + pub path: String, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct NodeConfig { + pub name: String, + pub role: String, + pub proxmox_vmid: i64, + pub proxmox_node: String, + pub ip: String, +} + +#[derive(Debug, Clone, Default, Deserialize)] +#[allow(dead_code)] +pub struct ValidationConfig { + #[serde(default)] + pub rules: std::collections::BTreeMap, +} + +#[derive(Debug, Clone, Default, Deserialize)] +#[allow(dead_code)] +pub struct RuleConfig { + #[serde(default)] + pub enabled: Option, + #[serde(default)] + pub allowed: Vec, +} + +#[derive(Debug, Clone, Deserialize)] +#[allow(dead_code)] +pub struct UpgradeConfig { + #[serde(default = "default_etcd_backup_before")] + pub etcd_backup_before: bool, + #[serde( + default = "default_health_check_timeout", + deserialize_with = "deserialize_duration" + )] + pub health_check_timeout: Duration, + #[serde( + default = "default_pause_between_nodes", + deserialize_with = "deserialize_duration" + )] + pub pause_between_nodes: Duration, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct TuiConfig { + #[serde(default)] + pub storage: StorageTuiConfig, + #[serde(default)] + pub backups: BackupsTuiConfig, + #[serde( + default = "default_refresh_interval", + deserialize_with = "deserialize_duration" + )] + pub refresh_interval: Duration, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct StorageTuiConfig { + #[serde(default = "default_warn_threshold")] + pub warn_threshold: u8, + #[serde(default = "default_crit_threshold")] + pub crit_threshold: u8, +} + +#[derive(Debug, Clone, Deserialize)] +pub struct BackupsTuiConfig { + #[serde(default = "default_stale_days")] + pub stale_days: u32, +} + +impl Default for UpgradeConfig { + fn default() -> Self { + Self { + etcd_backup_before: default_etcd_backup_before(), + health_check_timeout: default_health_check_timeout(), + pause_between_nodes: default_pause_between_nodes(), + } + } +} + +impl Default for TuiConfig { + fn default() -> Self { + Self { + storage: StorageTuiConfig::default(), + backups: BackupsTuiConfig::default(), + refresh_interval: default_refresh_interval(), + } + } +} + +impl Default for StorageTuiConfig { + fn default() -> Self { + Self { + warn_threshold: default_warn_threshold(), + crit_threshold: default_crit_threshold(), + } + } +} + +impl Default for BackupsTuiConfig { + fn default() -> Self { + Self { + stale_days: default_stale_days(), + } + } +} + +pub fn parse_args() -> Result> { + let mut args = env::args().skip(1); + while let Some(arg) = args.next() { + match arg.as_str() { + "-c" | "--config" => { + let Some(path) = args.next() else { + bail!("--config requires a path argument"); + }; + return Ok(Some(PathBuf::from(path))); + } + "-h" | "--help" => { + print_help(); + return Ok(None); + } + _ => {} + } + } + Ok(Some(discover_config()?)) +} + +pub fn print_help() { + println!("vitui - TUI for pvt cluster management\n"); + println!("Usage: vitui [options]\n"); + println!("Options:"); + println!(" -c, --config Path to pvt.yaml config file"); + println!(" -h, --help Show this help message\n"); + println!("Discovery order:"); + println!(" $PVT_CONFIG, ./pvt.yaml, ~/.config/pvt/config.yaml, ~/.config/pvt/pvt.yaml"); +} + +pub fn load_from_path(path: &Path) -> Result { + let raw = fs::read_to_string(path) + .with_context(|| format!("failed to read config file {}", path.display()))?; + let expanded = expand_env_vars(&raw)?; + let mut cfg: Config = serde_yaml::from_str(&expanded).context("failed to parse YAML config")?; + normalize_paths(&mut cfg); + validate(&cfg)?; + Ok(cfg) +} + +pub fn discover_config() -> Result { + if let Ok(path) = env::var("PVT_CONFIG") { + let path = PathBuf::from(path); + if path.exists() { + return Ok(path); + } + } + + let local = PathBuf::from("pvt.yaml"); + if local.exists() { + return local + .canonicalize() + .or_else(|_| Ok::(local.clone())) + .context("failed to resolve local pvt.yaml path"); + } + + let Some(home) = home_dir() else { + bail!( + "no pvt config file found (searched: $PVT_CONFIG, ./pvt.yaml, ~/.config/pvt/config.yaml, ~/.config/pvt/pvt.yaml)" + ); + }; + for candidate in [ + home.join(".config/pvt/config.yaml"), + home.join(".config/pvt/pvt.yaml"), + ] { + if candidate.exists() { + return Ok(candidate); + } + } + + bail!( + "no pvt config file found (searched: $PVT_CONFIG, ./pvt.yaml, ~/.config/pvt/config.yaml, ~/.config/pvt/pvt.yaml)" + ) +} + +fn normalize_paths(cfg: &mut Config) { + cfg.talos.config_path = expand_tilde(&cfg.talos.config_path); + for cluster in &mut cfg.clusters { + if let Some(source) = &mut cluster.config_source { + source.path = expand_tilde(&source.path); + } + } +} + +fn validate(cfg: &Config) -> Result<()> { + if cfg.version.trim().is_empty() { + bail!("config: version is required"); + } + if cfg.version != "1" { + bail!( + "config: unsupported version {:?} (supported: \"1\")", + cfg.version + ); + } + if cfg.proxmox.clusters.is_empty() { + bail!("config: at least one proxmox cluster must be defined"); + } + for (index, cluster) in cfg.proxmox.clusters.iter().enumerate() { + if cluster.name.trim().is_empty() { + bail!("config: proxmox.clusters[{index}].name is required"); + } + if cluster.endpoint.trim().is_empty() { + bail!("config: proxmox.clusters[{index}].endpoint is required"); + } + } + + if cfg.clusters.is_empty() { + bail!("config: at least one cluster must be defined"); + } + let pve_names = cfg + .proxmox + .clusters + .iter() + .map(|cluster| cluster.name.as_str()) + .collect::>(); + for (index, cluster) in cfg.clusters.iter().enumerate() { + if cluster.name.trim().is_empty() { + bail!("config: clusters[{index}].name is required"); + } + if cluster.proxmox_cluster.trim().is_empty() { + bail!("config: clusters[{index}].proxmox_cluster is required"); + } + if cluster.endpoint.trim().is_empty() { + bail!("config: clusters[{index}].endpoint is required"); + } + if cluster.nodes.is_empty() { + bail!("config: clusters[{index}].nodes must not be empty"); + } + if !pve_names.contains(cluster.proxmox_cluster.as_str()) { + bail!( + "config: clusters[{index}].proxmox_cluster {:?} does not match any defined proxmox cluster", + cluster.proxmox_cluster + ); + } + for (node_index, node) in cluster.nodes.iter().enumerate() { + if node.name.trim().is_empty() { + bail!("config: clusters[{index}].nodes[{node_index}].name is required"); + } + if !matches!(node.role.as_str(), "controlplane" | "worker") { + bail!( + "config: clusters[{index}].nodes[{node_index}].role must be \"controlplane\" or \"worker\", got {:?}", + node.role + ); + } + if node.proxmox_vmid == 0 { + bail!("config: clusters[{index}].nodes[{node_index}].proxmox_vmid is required"); + } + if node.proxmox_node.trim().is_empty() { + bail!("config: clusters[{index}].nodes[{node_index}].proxmox_node is required"); + } + if node.ip.trim().is_empty() { + bail!("config: clusters[{index}].nodes[{node_index}].ip is required"); + } + } + } + + Ok(()) +} + +pub fn expand_env_vars(input: &str) -> Result { + let mut out = String::with_capacity(input.len()); + let chars = input.as_bytes(); + let mut index = 0; + while index < chars.len() { + if chars[index] == b'$' { + if chars.get(index + 1) == Some(&b'{') { + let start = index + 2; + let Some(end) = input[start..].find('}') else { + return Err(anyhow!("unterminated environment variable reference")); + }; + let end = start + end; + let name = &input[start..end]; + let value = env::var(name).unwrap_or_default(); + out.push_str(&value); + index = end + 1; + continue; + } + + let start = index + 1; + let mut end = start; + while end < chars.len() + && matches!(chars[end], b'A'..=b'Z' | b'a'..=b'z' | b'0'..=b'9' | b'_') + { + end += 1; + } + if end > start { + let name = &input[start..end]; + let value = env::var(name).unwrap_or_default(); + out.push_str(&value); + index = end; + continue; + } + } + out.push(chars[index] as char); + index += 1; + } + Ok(out) +} + +pub fn parse_duration(input: &str) -> Result { + if input.is_empty() { + return Ok(default_refresh_interval()); + } + let (value, unit) = input.split_at(input.len() - 1); + let amount = value + .parse::() + .with_context(|| format!("invalid duration value: {input}"))?; + match unit { + "s" => Ok(Duration::from_secs(amount)), + "m" => Ok(Duration::from_secs(amount * 60)), + "h" => Ok(Duration::from_secs(amount * 60 * 60)), + _ => bail!("unsupported duration unit in {input}"), + } +} + +fn deserialize_duration<'de, D>(deserializer: D) -> std::result::Result +where + D: serde::Deserializer<'de>, +{ + let value = String::deserialize(deserializer)?; + parse_duration(&value).map_err(serde::de::Error::custom) +} + +fn expand_tilde(value: &str) -> String { + if let Some(stripped) = value.strip_prefix('~') + && let Some(home) = home_dir() + { + return format!("{}{}", home.display(), stripped); + } + value.to_string() +} + +fn home_dir() -> Option { + env::var_os("HOME").map(PathBuf::from) +} + +fn default_tls_verify() -> bool { + true +} +fn default_etcd_backup_before() -> bool { + true +} +fn default_health_check_timeout() -> Duration { + Duration::from_secs(300) +} +fn default_pause_between_nodes() -> Duration { + Duration::from_secs(30) +} +fn default_warn_threshold() -> u8 { + 10 +} +fn default_crit_threshold() -> u8 { + 5 +} +fn default_stale_days() -> u32 { + 30 +} +fn default_refresh_interval() -> Duration { + Duration::from_secs(30) +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + const VALID_CONFIG: &str = r#" +version: "1" +proxmox: + clusters: + - name: homelab + endpoint: https://proxmox.local:8006 + token_id: pvt@pam!automation + token_secret: ${PVT_TOKEN} + tls_verify: false +talos: + config_path: ~/.talos/config + context: prod +clusters: + - name: prod + proxmox_cluster: homelab + endpoint: https://192.168.1.100:6443 + config_source: + type: directory + path: ~/talos/prod + nodes: + - name: cp-1 + role: controlplane + proxmox_vmid: 100 + proxmox_node: pve1 + ip: 192.168.1.100 +"#; + + #[test] + fn expands_env_vars_to_empty_like_go_loader() { + let parsed = expand_env_vars("token: ${MISSING_VAR}").unwrap(); + assert_eq!(parsed, "token: "); + let parsed = expand_env_vars("token: $MISSING_VAR").unwrap(); + assert_eq!(parsed, "token: "); + } + + #[test] + fn parses_duration_values() { + assert_eq!(parse_duration("30s").unwrap(), Duration::from_secs(30)); + assert_eq!(parse_duration("5m").unwrap(), Duration::from_secs(300)); + assert!(parse_duration("oops").is_err()); + } + + #[test] + fn loads_and_normalizes_paths() { + unsafe { + env::set_var("PVT_TOKEN", "secret"); + } + let temp = TempDir::new().unwrap(); + let mut path = temp.path().to_path_buf(); + path.push("config.yaml"); + fs::write(&path, VALID_CONFIG).unwrap(); + let cfg = load_from_path(&path).unwrap(); + assert_eq!(cfg.version, "1"); + assert!(cfg.talos.config_path.contains('/')); + assert_eq!(cfg.proxmox.clusters[0].token_secret, "secret"); + } + + #[test] + fn rejects_invalid_role() { + let invalid = VALID_CONFIG.replace("controlplane", "master"); + let temp = TempDir::new().unwrap(); + let mut path = temp.path().to_path_buf(); + path.push("config.yaml"); + fs::write(&path, invalid).unwrap(); + let error = load_from_path(&path).unwrap_err().to_string(); + assert!(error.contains("controlplane")); + } + + #[test] + fn discovers_legacy_home_fallback() { + let temp = TempDir::new().unwrap(); + let home = temp.path(); + let legacy = home.join(".config/pvt/pvt.yaml"); + fs::create_dir_all(legacy.parent().unwrap()).unwrap(); + fs::write(&legacy, "version: \"1\"\n").unwrap(); + unsafe { + env::remove_var("PVT_CONFIG"); + env::set_var("HOME", home); + } + std::env::set_current_dir(temp.path()).unwrap(); + let found = discover_config().unwrap(); + assert_eq!(found, legacy); + } +} diff --git a/tui/src/config.zig b/tui/src/config.zig deleted file mode 100644 index e47b91d..0000000 --- a/tui/src/config.zig +++ /dev/null @@ -1,320 +0,0 @@ -const std = @import("std"); -const yaml = @import("yaml"); -const Allocator = std.mem.Allocator; - -const Value = yaml.Yaml.Value; -const Map = yaml.Yaml.Map; - -var discover_path_buf: [std.fs.max_path_bytes]u8 = undefined; - -// ── Config types ───────────────────────────────────────────────────── - -pub const Config = struct { - version: []const u8, - proxmox: ProxmoxConfig, - talos: TalosConfig, - clusters: []const ClusterConfig, - tui_settings: TuiSettings, -}; - -pub const ProxmoxConfig = struct { - clusters: []const ProxmoxCluster, -}; - -pub const ProxmoxCluster = struct { - name: []const u8, - endpoint: []const u8, - token_id: []const u8, - token_secret: []const u8, - tls_verify: bool, -}; - -pub const TalosConfig = struct { - config_path: []const u8, - context: []const u8, -}; - -pub const ClusterConfig = struct { - name: []const u8, - proxmox_cluster: []const u8, - endpoint: []const u8, - nodes: []const NodeConfig, -}; - -pub const NodeConfig = struct { - name: []const u8, - role: []const u8, - proxmox_vmid: i64, - proxmox_node: []const u8, - ip: []const u8, -}; - -pub const TuiSettings = struct { - warn_threshold: u8 = 10, - crit_threshold: u8 = 5, - stale_days: u32 = 30, - refresh_interval_ms: u64 = 30_000, -}; - -// ── Parsing helpers ────────────────────────────────────────────────── - -fn getStr(map: Map, key: []const u8) ![]const u8 { - const val = map.get(key) orelse return error.ConfigParseFailed; - return val.asScalar() orelse return error.ConfigParseFailed; -} - -fn getStrOr(map: Map, key: []const u8, default: []const u8) []const u8 { - const val = map.get(key) orelse return default; - return val.asScalar() orelse default; -} - -fn getBool(map: Map, key: []const u8, default: bool) bool { - const val = map.get(key) orelse return default; - if (val == .boolean) return val.boolean; - const s = val.asScalar() orelse return default; - if (std.mem.eql(u8, s, "true") or std.mem.eql(u8, s, "yes")) return true; - if (std.mem.eql(u8, s, "false") or std.mem.eql(u8, s, "no")) return false; - return default; -} - -fn getInt(map: Map, key: []const u8, default: i64) i64 { - const val = map.get(key) orelse return default; - const s = val.asScalar() orelse return default; - return std.fmt.parseInt(i64, s, 10) catch default; -} - -fn getList(map: Map, key: []const u8) ?[]Value { - const val = map.get(key) orelse return null; - return val.asList(); -} - -fn getMap(map: Map, key: []const u8) ?Map { - const val = map.get(key) orelse return null; - return val.asMap(); -} - -// ── Config loading ─────────────────────────────────────────────────── - -pub fn load(alloc: Allocator, path: []const u8) !Config { - const raw = std.fs.cwd().readFileAlloc(alloc, path, 1024 * 1024) catch |err| { - std.log.err("failed to read config file '{s}': {}", .{ path, err }); - return error.ConfigReadFailed; - }; - defer alloc.free(raw); - - const expanded = expandEnvVars(alloc, raw) catch |err| { - std.log.err("failed to expand environment variables: {}", .{err}); - return err; - }; - defer alloc.free(expanded); - - var y: yaml.Yaml = .{ .source = expanded }; - y.load(alloc) catch |err| { - if (err == error.ParseFailure) { - std.log.err("invalid YAML in config file", .{}); - } - return error.ConfigParseFailed; - }; - defer y.deinit(alloc); - - if (y.docs.items.len == 0) { - std.log.err("empty config file", .{}); - return error.ConfigParseFailed; - } - - const root_map = y.docs.items[0].asMap() orelse { - std.log.err("config root must be a mapping", .{}); - return error.ConfigParseFailed; - }; - - return parseConfig(alloc, root_map); -} - -/// Dupe a string from the YAML tree so it outlives y.deinit(). -/// Also expands leading ~ to $HOME. -fn dupeStr(alloc: Allocator, map: Map, key: []const u8) ![]const u8 { - const raw = try getStr(map, key); - if (raw.len > 0 and raw[0] == '~') { - const home = std.posix.getenv("HOME") orelse return alloc.dupe(u8, raw); - return std.fmt.allocPrint(alloc, "{s}{s}", .{ home, raw[1..] }); - } - return alloc.dupe(u8, raw); -} - -fn parseConfig(alloc: Allocator, root: Map) !Config { - const version = try getStr(root, "version"); - if (!std.mem.eql(u8, version, "1")) { - std.log.err("unsupported config version: {s}", .{version}); - return error.UnsupportedVersion; - } - - // Parse proxmox section - const pve_map = getMap(root, "proxmox") orelse { - std.log.err("missing 'proxmox' section", .{}); - return error.ConfigParseFailed; - }; - const pve_clusters_list = getList(pve_map, "clusters") orelse { - std.log.err("missing 'proxmox.clusters'", .{}); - return error.ConfigParseFailed; - }; - var pve_clusters = try alloc.alloc(ProxmoxCluster, pve_clusters_list.len); - for (pve_clusters_list, 0..) |item, i| { - const m = item.asMap() orelse return error.ConfigParseFailed; - pve_clusters[i] = .{ - .name = try dupeStr(alloc, m, "name"), - .endpoint = try dupeStr(alloc, m, "endpoint"), - .token_id = try dupeStr(alloc, m, "token_id"), - .token_secret = try dupeStr(alloc, m, "token_secret"), - .tls_verify = getBool(m, "tls_verify", true), - }; - } - - // Parse talos section - const talos_map = getMap(root, "talos") orelse { - std.log.err("missing 'talos' section", .{}); - return error.ConfigParseFailed; - }; - const talos = TalosConfig{ - .config_path = try dupeStr(alloc, talos_map, "config_path"), - .context = try dupeStr(alloc, talos_map, "context"), - }; - - // Parse clusters section - const clusters_list = getList(root, "clusters") orelse { - std.log.err("missing 'clusters' section", .{}); - return error.ConfigParseFailed; - }; - var clusters = try alloc.alloc(ClusterConfig, clusters_list.len); - for (clusters_list, 0..) |item, i| { - const m = item.asMap() orelse return error.ConfigParseFailed; - const nodes_list = getList(m, "nodes") orelse return error.ConfigParseFailed; - var nodes = try alloc.alloc(NodeConfig, nodes_list.len); - for (nodes_list, 0..) |n, j| { - const nm = n.asMap() orelse return error.ConfigParseFailed; - nodes[j] = .{ - .name = try dupeStr(alloc, nm, "name"), - .role = try dupeStr(alloc, nm, "role"), - .proxmox_vmid = getInt(nm, "proxmox_vmid", 0), - .proxmox_node = try dupeStr(alloc, nm, "proxmox_node"), - .ip = try dupeStr(alloc, nm, "ip"), - }; - } - clusters[i] = .{ - .name = try dupeStr(alloc, m, "name"), - .proxmox_cluster = try dupeStr(alloc, m, "proxmox_cluster"), - .endpoint = try dupeStr(alloc, m, "endpoint"), - .nodes = nodes, - }; - } - - // Parse optional tui section - var tui_settings = TuiSettings{}; - if (getMap(root, "tui")) |tui_map| { - if (getMap(tui_map, "storage")) |storage| { - const w = getInt(storage, "warn_threshold", 10); - const c = getInt(storage, "crit_threshold", 5); - tui_settings.warn_threshold = @intCast(@max(0, @min(100, w))); - tui_settings.crit_threshold = @intCast(@max(0, @min(100, c))); - } - if (getMap(tui_map, "backups")) |backups| { - const d = getInt(backups, "stale_days", 30); - tui_settings.stale_days = @intCast(@max(0, d)); - } - const ri = getStrOr(tui_map, "refresh_interval", "30s"); - tui_settings.refresh_interval_ms = parseDurationMs(ri); - } - - return .{ - .version = try alloc.dupe(u8, version), - .proxmox = .{ .clusters = pve_clusters }, - .talos = talos, - .clusters = clusters, - .tui_settings = tui_settings, - }; -} - -// ── Utility functions ──────────────────────────────────────────────── - -/// Parse a duration string like "5m", "30s", "1h" into milliseconds. -pub fn parseDurationMs(s: []const u8) u64 { - if (s.len == 0) return 30_000; - const suffix = s[s.len - 1]; - const num_str = s[0 .. s.len - 1]; - const num = std.fmt.parseInt(u64, num_str, 10) catch return 30_000; - return switch (suffix) { - 's' => num * 1_000, - 'm' => num * 60_000, - 'h' => num * 3_600_000, - else => 30_000, - }; -} - -/// Expand `${VAR}` references in a string using environment variables. -pub fn expandEnvVars(alloc: Allocator, input: []const u8) ![]const u8 { - var result: std.ArrayListUnmanaged(u8) = .empty; - errdefer result.deinit(alloc); - - var i: usize = 0; - while (i < input.len) { - if (i + 1 < input.len and input[i] == '$' and input[i + 1] == '{') { - const start = i + 2; - const end = std.mem.indexOfScalarPos(u8, input, start, '}') orelse { - return error.UnterminatedEnvVar; - }; - const var_name = input[start..end]; - const val = std.posix.getenv(var_name) orelse { - std.log.err("environment variable not set: {s}", .{var_name}); - return error.EnvVarNotSet; - }; - try result.appendSlice(alloc, val); - i = end + 1; - } else { - try result.append(alloc, input[i]); - i += 1; - } - } - return result.toOwnedSlice(alloc); -} - -/// Discover the config file path using standard search order. -pub fn discover() ![]const u8 { - if (std.posix.getenv("PVT_CONFIG")) |p| { - std.fs.cwd().access(p, .{}) catch return error.ConfigNotFound; - return p; - } - std.fs.cwd().access("pvt.yaml", .{}) catch { - const home = std.posix.getenv("HOME") orelse return error.ConfigNotFound; - const fallback = std.fmt.bufPrint(&discover_path_buf, "{s}/.config/pvt/config.yaml", .{home}) catch { - return error.ConfigNotFound; - }; - std.fs.cwd().access(fallback, .{}) catch return error.ConfigNotFound; - return fallback; - }; - return "pvt.yaml"; -} - -// ── Tests ──────────────────────────────────────────────────────────── - -test "parseDurationMs" { - const expect = std.testing.expect; - try expect(parseDurationMs("30s") == 30_000); - try expect(parseDurationMs("5m") == 300_000); - try expect(parseDurationMs("1h") == 3_600_000); - try expect(parseDurationMs("") == 30_000); - try expect(parseDurationMs("bad") == 30_000); -} - -test "expandEnvVars basic" { - const alloc = std.testing.allocator; - const result = try expandEnvVars(alloc, "hello world"); - defer alloc.free(result); - try std.testing.expectEqualStrings("hello world", result); -} - -test "TuiSettings defaults" { - const s = TuiSettings{}; - try std.testing.expect(s.warn_threshold == 10); - try std.testing.expect(s.crit_threshold == 5); - try std.testing.expect(s.stale_days == 30); - try std.testing.expect(s.refresh_interval_ms == 30_000); -} diff --git a/tui/src/integrations/command.rs b/tui/src/integrations/command.rs new file mode 100644 index 0000000..1c13268 --- /dev/null +++ b/tui/src/integrations/command.rs @@ -0,0 +1,97 @@ +use anyhow::{Context, Result, bail}; +use std::{ + env, + path::PathBuf, + process::{Command, Stdio}, +}; + +pub fn run(argv: &[String], max_output_bytes: usize) -> Result { + run_with_input(argv, None, max_output_bytes) +} + +pub fn run_with_input( + argv: &[String], + input: Option<&str>, + max_output_bytes: usize, +) -> Result { + if argv.is_empty() { + bail!("empty command argv"); + } + let mut command = Command::new(&argv[0]); + command.args(&argv[1..]); + if input.is_some() { + command.stdin(Stdio::piped()); + } else { + command.stdin(Stdio::null()); + } + command.stdout(Stdio::piped()); + command.stderr(Stdio::piped()); + let mut child = command + .spawn() + .with_context(|| format!("failed to run {}", argv[0]))?; + if let Some(input) = input + && let Some(mut stdin) = child.stdin.take() + { + use std::io::Write as _; + stdin + .write_all(input.as_bytes()) + .with_context(|| format!("failed to write stdin for {}", argv[0]))?; + } + let output = child + .wait_with_output() + .with_context(|| format!("failed to wait for {}", argv[0]))?; + if !output.status.success() { + bail!( + "command failed: {}", + String::from_utf8_lossy(&output.stderr).trim() + ); + } + let mut stdout = output.stdout; + if stdout.len() > max_output_bytes { + stdout.truncate(max_output_bytes); + } + Ok(String::from_utf8_lossy(&stdout).into_owned()) +} + +pub fn resolve_binary(name: &str, env_override: &str) -> Result { + if let Some(path) = env::var_os(env_override) { + let path = PathBuf::from(path); + if path.is_file() { + return Ok(path.to_string_lossy().into_owned()); + } + } + + for directory in ["/usr/bin", "/bin", "/usr/local/bin", "/opt/homebrew/bin"] { + let candidate = PathBuf::from(directory).join(name); + if candidate.is_file() { + return Ok(candidate.to_string_lossy().into_owned()); + } + } + + bail!("unable to resolve binary for {name}") +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn captures_stdout_without_inheriting_terminal_io() { + let shell = resolve_binary("sh", "PVT_SH_BIN").unwrap_or_else(|_| "/bin/sh".to_string()); + let argv = vec![ + shell, + "-c".to_string(), + "printf 'visible'; printf 'hidden' 1>&2".to_string(), + ]; + let output = run(&argv, 1024).unwrap(); + assert_eq!(output, "visible"); + } + + #[test] + fn supports_stdin_for_curl_config_style_commands() { + let shell = resolve_binary("sh", "PVT_SH_BIN").unwrap_or_else(|_| "/bin/sh".to_string()); + let argv = vec![shell, "-c".to_string(), "cat".to_string()]; + let output = run_with_input(&argv, Some("hello"), 1024).unwrap(); + assert_eq!(output, "hello"); + } +} diff --git a/tui/src/integrations/kubernetes.rs b/tui/src/integrations/kubernetes.rs new file mode 100644 index 0000000..898096a --- /dev/null +++ b/tui/src/integrations/kubernetes.rs @@ -0,0 +1,231 @@ +use crate::integrations::command; +use anyhow::Result; +use serde_json::Value; +use std::{env, path::PathBuf}; + +#[derive(Debug, Clone, Default)] +pub struct K8sBackupEntry { + pub name: String, + pub namespace: String, + pub source_type: String, + pub status: String, + pub schedule: String, + pub last_run: String, +} + +#[derive(Debug, Clone, Default)] +pub struct DetectedProviders { + pub volsync: bool, + pub velero: bool, +} + +#[derive(Debug, Clone, Default)] +pub struct ClusterNode { + pub name: String, + pub internal_ip: Option, + pub role: String, +} + +pub fn detect_providers() -> Result { + let output = kubectl(&[ + "get", + "crd", + "--no-headers", + "-o", + "custom-columns=NAME:.metadata.name", + ])?; + let mut providers = DetectedProviders::default(); + for line in output.lines() { + let line = line.trim(); + if line.contains("volsync") { + providers.volsync = true; + } + if line.contains("velero") { + providers.velero = true; + } + } + Ok(providers) +} + +pub fn get_volsync_sources() -> Result> { + parse_backups( + &kubectl(&[ + "get", + "replicationsources.volsync.backube", + "-A", + "-o", + "json", + ])?, + "VolSync", + ) +} + +pub fn get_velero_backups() -> Result> { + parse_backups( + &kubectl(&["get", "backups.velero.io", "-A", "-o", "json"])?, + "Velero", + ) +} + +pub fn get_cluster_nodes() -> Result> { + let parsed: Value = serde_json::from_str(&kubectl(&["get", "nodes", "-o", "json"])?)?; + let items = parsed + .get("items") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + Ok(items + .into_iter() + .map(|item| { + let metadata = item.get("metadata").cloned().unwrap_or(Value::Null); + let status = item.get("status").cloned().unwrap_or(Value::Null); + let labels = metadata + .get("labels") + .and_then(Value::as_object) + .cloned() + .unwrap_or_default(); + let internal_ip = + status + .get("addresses") + .and_then(Value::as_array) + .and_then(|addresses| { + addresses.iter().find_map(|address| { + (address.get("type").and_then(Value::as_str) == Some("InternalIP")) + .then(|| { + address + .get("address") + .and_then(Value::as_str) + .map(ToOwned::to_owned) + }) + .flatten() + }) + }); + let role = if labels.contains_key("node-role.kubernetes.io/control-plane") + || labels.contains_key("node-role.kubernetes.io/master") + { + "controlplane".to_string() + } else { + "worker".to_string() + }; + + ClusterNode { + name: metadata + .get("name") + .and_then(Value::as_str) + .unwrap_or("unknown") + .to_string(), + internal_ip, + role, + } + }) + .collect()) +} + +fn parse_backups(body: &str, source_type: &str) -> Result> { + let parsed: Value = serde_json::from_str(body)?; + let items = parsed + .get("items") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + Ok(items + .into_iter() + .map(|item| { + let metadata = item.get("metadata").cloned().unwrap_or(Value::Null); + let status = item.get("status").cloned().unwrap_or(Value::Null); + let spec = item.get("spec").cloned().unwrap_or(Value::Null); + K8sBackupEntry { + name: metadata + .get("name") + .and_then(Value::as_str) + .unwrap_or("unknown") + .to_string(), + namespace: metadata + .get("namespace") + .and_then(Value::as_str) + .unwrap_or("default") + .to_string(), + source_type: source_type.to_string(), + status: if source_type == "VolSync" { + parse_volsync_status(&status) + } else { + status + .get("phase") + .and_then(Value::as_str) + .unwrap_or("unknown") + .to_string() + }, + schedule: if source_type == "VolSync" { + spec.get("trigger") + .and_then(|value| value.get("schedule")) + .and_then(Value::as_str) + .unwrap_or("-") + .to_string() + } else { + spec.get("scheduleName") + .and_then(Value::as_str) + .unwrap_or("-") + .to_string() + }, + last_run: if source_type == "VolSync" { + status + .get("lastSyncTime") + .and_then(Value::as_str) + .unwrap_or("-") + .to_string() + } else { + status + .get("completionTimestamp") + .and_then(Value::as_str) + .unwrap_or("-") + .to_string() + }, + } + }) + .collect()) +} + +fn parse_volsync_status(status: &Value) -> String { + let conditions = status + .get("conditions") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + for condition in conditions { + if condition.get("type").and_then(Value::as_str) == Some("Synchronizing") { + return if condition.get("status").and_then(Value::as_str) == Some("True") { + "Syncing".to_string() + } else { + "Idle".to_string() + }; + } + } + "unknown".to_string() +} + +fn kubectl(extra: &[&str]) -> Result { + let kubectl = command::resolve_binary("kubectl", "PVT_KUBECTL_BIN")?; + let mut argv = vec![kubectl]; + argv.extend(extra.iter().map(|value| value.to_string())); + if let Some(kubeconfig) = discover_kubeconfig() { + argv.push("--kubeconfig".to_string()); + argv.push(kubeconfig); + } + command::run(&argv, 512 * 1024) +} + +pub(crate) fn discover_kubeconfig() -> Option { + if let Some(path) = env::var_os("KUBECONFIG") { + let path = PathBuf::from(path); + if path.is_file() { + return Some(path.to_string_lossy().into_owned()); + } + } + + let home = env::var_os("HOME")?; + let candidate = PathBuf::from(home).join(".kube/config"); + if candidate.is_file() { + return Some(candidate.to_string_lossy().into_owned()); + } + None +} diff --git a/tui/src/integrations/metrics.rs b/tui/src/integrations/metrics.rs new file mode 100644 index 0000000..cb9b4cb --- /dev/null +++ b/tui/src/integrations/metrics.rs @@ -0,0 +1,92 @@ +use crate::integrations::command; +use anyhow::Result; +use serde_json::Value; + +#[derive(Debug, Clone, Default)] +pub struct MetricSample { + pub labels: serde_json::Map, + pub value: f64, +} + +#[derive(Debug, Clone)] +pub struct MetricsClient { + endpoint: String, +} + +impl MetricsClient { + pub fn detect() -> Result> { + let kubectl = command::resolve_binary("kubectl", "PVT_KUBECTL_BIN")?; + let candidates = [ + ( + "monitoring", + "vmsingle-victoria-metrics-victoria-metrics-single-server", + "8428", + ), + ("monitoring", "vmselect", "8481"), + ("monitoring", "prometheus-server", "9090"), + ("monitoring", "prometheus-operated", "9090"), + ("observability", "prometheus-server", "9090"), + ("observability", "prometheus-operated", "9090"), + ]; + for (namespace, service, port) in candidates { + let mut argv = vec![ + kubectl.clone(), + "get".to_string(), + "svc".to_string(), + service.to_string(), + "-n".to_string(), + namespace.to_string(), + "--no-headers".to_string(), + "-o".to_string(), + "name".to_string(), + ]; + if let Some(kubeconfig) = super::kubernetes::discover_kubeconfig() { + argv.push("--kubeconfig".to_string()); + argv.push(kubeconfig); + } + if command::run(&argv, 4096).is_ok() { + return Ok(Some(Self { + endpoint: format!("http://{service}.{namespace}.svc:{port}"), + })); + } + } + Ok(None) + } + + pub fn query(&self, query: &str) -> Result> { + let curl = command::resolve_binary("curl", "PVT_CURL_BIN")?; + let argv = vec![ + curl, + "-s".to_string(), + "-f".to_string(), + "--max-time".to_string(), + "5".to_string(), + "--get".to_string(), + "--data-urlencode".to_string(), + format!("query={query}"), + format!("{}/api/v1/query", self.endpoint), + ]; + let body = command::run(&argv, 1024 * 1024)?; + let parsed: Value = serde_json::from_str(&body)?; + let results = parsed + .get("data") + .and_then(|value| value.get("result")) + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + Ok(results + .into_iter() + .filter_map(|item| { + let labels = item.get("metric")?.as_object()?.clone(); + let value = item + .get("value")? + .as_array()? + .get(1)? + .as_str()? + .parse::() + .ok()?; + Some(MetricSample { labels, value }) + }) + .collect()) + } +} diff --git a/tui/src/integrations/mod.rs b/tui/src/integrations/mod.rs new file mode 100644 index 0000000..f6af51c --- /dev/null +++ b/tui/src/integrations/mod.rs @@ -0,0 +1,5 @@ +pub mod command; +pub mod kubernetes; +pub mod metrics; +pub mod proxmox; +pub mod talos; diff --git a/tui/src/integrations/proxmox.rs b/tui/src/integrations/proxmox.rs new file mode 100644 index 0000000..32beb45 --- /dev/null +++ b/tui/src/integrations/proxmox.rs @@ -0,0 +1,198 @@ +use crate::config::ProxmoxCluster; +use crate::integrations::command; +use anyhow::{Context, Result}; +use serde_json::Value; + +#[derive(Debug, Clone, Default)] +pub struct VmStatus { + pub vmid: i64, + pub name: String, + pub status: String, + pub node: String, + pub maxdisk: i64, +} + +#[derive(Debug, Clone, Default)] +pub struct StoragePool { + pub name: String, + pub node: String, + pub pool_type: String, + pub status: String, + pub disk: i64, + pub maxdisk: i64, +} + +#[derive(Debug, Clone, Default)] +pub struct NodeStatus { + pub node: String, + pub cpu: f64, + pub mem: i64, + pub maxmem: i64, +} + +#[derive(Debug, Clone, Default)] +pub struct BackupEntry { + pub volid: String, + pub node: String, + pub storage: String, + pub size: i64, + pub ctime: i64, + pub vmid: i64, +} + +pub fn get_cluster_resources(cluster: &ProxmoxCluster) -> Result> { + let body = request(cluster, "GET", "/api2/json/cluster/resources?type=vm")?; + let items = data_array(&body)?; + Ok(items + .into_iter() + .filter(|item| item.get("type").and_then(Value::as_str) == Some("qemu")) + .map(|item| VmStatus { + vmid: json_i64(&item, "vmid"), + name: json_string(&item, "name", "unknown"), + status: json_string(&item, "status", "unknown"), + node: json_string(&item, "node", "unknown"), + maxdisk: json_i64(&item, "maxdisk"), + }) + .collect()) +} + +pub fn get_storage_pools(cluster: &ProxmoxCluster) -> Result> { + let body = request(cluster, "GET", "/api2/json/cluster/resources?type=storage")?; + let items = data_array(&body)?; + Ok(items + .into_iter() + .map(|item| StoragePool { + name: json_string(&item, "storage", "unknown"), + node: json_string(&item, "node", "unknown"), + pool_type: item + .get("plugintype") + .and_then(Value::as_str) + .or_else(|| item.get("type").and_then(Value::as_str)) + .unwrap_or("unknown") + .to_string(), + status: json_string(&item, "status", "unknown"), + disk: json_i64(&item, "disk"), + maxdisk: json_i64(&item, "maxdisk"), + }) + .collect()) +} + +pub fn get_node_status(cluster: &ProxmoxCluster, node: &str) -> Result { + let body = request(cluster, "GET", &format!("/api2/json/nodes/{node}/status"))?; + let data = data_object(&body)?; + Ok(NodeStatus { + node: node.to_string(), + cpu: json_f64(&data, "cpu"), + mem: json_i64(&data, "mem"), + maxmem: json_i64(&data, "maxmem"), + }) +} + +pub fn list_backups( + cluster: &ProxmoxCluster, + node: &str, + storage: &str, +) -> Result> { + let body = request( + cluster, + "GET", + &format!("/api2/json/nodes/{node}/storage/{storage}/content?content=backup"), + )?; + let items = data_array(&body)?; + Ok(items + .into_iter() + .map(|item| BackupEntry { + volid: json_string(&item, "volid", ""), + node: node.to_string(), + storage: storage.to_string(), + size: json_i64(&item, "size"), + ctime: json_i64(&item, "ctime"), + vmid: json_i64(&item, "vmid"), + }) + .collect()) +} + +pub fn delete_backup( + cluster: &ProxmoxCluster, + node: &str, + storage: &str, + volid: &str, +) -> Result<()> { + let encoded = volid.replace(':', "%3A").replace('/', "%2F"); + let _ = request( + cluster, + "DELETE", + &format!("/api2/json/nodes/{node}/storage/{storage}/content/{encoded}"), + )?; + Ok(()) +} + +fn request(cluster: &ProxmoxCluster, method: &str, path: &str) -> Result { + let curl = command::resolve_binary("curl", "PVT_CURL_BIN")?; + let url = format!("{}{}", cluster.endpoint, path); + let mut argv = vec![ + curl, + "--silent".to_string(), + "--show-error".to_string(), + "--fail".to_string(), + "--max-time".to_string(), + "10".to_string(), + "--config".to_string(), + "-".to_string(), + ]; + if method != "GET" { + argv.push("-X".to_string()); + argv.push(method.to_string()); + } + if !cluster.tls_verify { + argv.push("-k".to_string()); + } + let curl_config = format!( + "url = \"{}\"\nheader = \"Authorization: PVEAPIToken={}={}\"\n", + escape_curl_config(&url), + escape_curl_config(&cluster.token_id), + escape_curl_config(&cluster.token_secret), + ); + command::run_with_input(&argv, Some(&curl_config), 1024 * 1024) + .context("proxmox request failed") +} + +fn escape_curl_config(value: &str) -> String { + value.replace('\\', "\\\\").replace('"', "\\\"") +} + +fn data_array(body: &str) -> Result> { + let parsed: Value = serde_json::from_str(body).context("invalid proxmox JSON")?; + Ok(parsed + .get("data") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default()) +} + +fn data_object(body: &str) -> Result { + let parsed: Value = serde_json::from_str(body).context("invalid proxmox JSON")?; + Ok(parsed.get("data").cloned().unwrap_or(Value::Null)) +} + +fn json_string(value: &Value, key: &str, default: &str) -> String { + value + .get(key) + .and_then(Value::as_str) + .unwrap_or(default) + .to_string() +} + +fn json_i64(value: &Value, key: &str) -> i64 { + value + .get(key) + .and_then(|value| value.as_i64().or_else(|| value.as_f64().map(|v| v as i64))) + .unwrap_or_default() +} + +fn json_f64(value: &Value, key: &str) -> f64 { + value + .get(key) + .and_then(|value| value.as_f64().or_else(|| value.as_i64().map(|v| v as f64))) + .unwrap_or_default() +} diff --git a/tui/src/integrations/talos.rs b/tui/src/integrations/talos.rs new file mode 100644 index 0000000..238cd04 --- /dev/null +++ b/tui/src/integrations/talos.rs @@ -0,0 +1,95 @@ +use crate::config::TalosConfig; +use crate::integrations::command; +use anyhow::Result; +use serde_json::Value; + +#[derive(Debug, Clone, Default)] +pub struct TalosVersion { + pub talos_version: String, + pub kubernetes_version: String, +} + +#[derive(Debug, Clone, Default)] +pub struct EtcdMember { + pub hostname: String, + pub is_learner: bool, +} + +pub fn get_version(talos: &TalosConfig, node_ip: &str) -> Result { + let argv = talosctl_args( + talos, + vec![ + "version".to_string(), + "--nodes".to_string(), + node_ip.to_string(), + "--short".to_string(), + ], + )?; + let output = command::run(&argv, 512 * 1024)?; + let parsed: Value = serde_json::from_str(&output)?; + let message = parsed + .get("messages") + .and_then(Value::as_array) + .and_then(|items| items.first()) + .and_then(Value::as_object) + .ok_or_else(|| anyhow::anyhow!("missing talos version payload"))?; + let version = message + .get("version") + .and_then(Value::as_object) + .ok_or_else(|| anyhow::anyhow!("missing version object"))?; + Ok(TalosVersion { + talos_version: version + .get("tag") + .and_then(Value::as_str) + .unwrap_or("unknown") + .to_string(), + kubernetes_version: version + .get("kubernetes_version") + .and_then(Value::as_str) + .unwrap_or("-") + .to_string(), + }) +} + +pub fn get_etcd_members(talos: &TalosConfig) -> Result> { + let argv = talosctl_args(talos, vec!["etcd".to_string(), "members".to_string()])?; + let output = command::run(&argv, 512 * 1024)?; + let parsed: Value = serde_json::from_str(&output)?; + let members = parsed + .get("messages") + .and_then(Value::as_array) + .and_then(|items| items.first()) + .and_then(|item| item.get("members")) + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + Ok(members + .into_iter() + .map(|item| EtcdMember { + hostname: item + .get("hostname") + .and_then(Value::as_str) + .unwrap_or("unknown") + .to_string(), + is_learner: item + .get("is_learner") + .and_then(Value::as_bool) + .unwrap_or(false), + }) + .collect()) +} + +fn talosctl_args(talos: &TalosConfig, extra: Vec) -> Result> { + let binary = command::resolve_binary("talosctl", "PVT_TALOSCTL_BIN")?; + let mut argv = vec![binary]; + argv.extend(extra); + argv.push("--talosconfig".to_string()); + argv.push(talos.config_path.clone()); + if !talos.context.is_empty() { + argv.push("--context".to_string()); + argv.push(talos.context.clone()); + } + argv.push("-o".to_string()); + argv.push("json".to_string()); + Ok(argv) +} diff --git a/tui/src/main.rs b/tui/src/main.rs new file mode 100644 index 0000000..75170f4 --- /dev/null +++ b/tui/src/main.rs @@ -0,0 +1,22 @@ +mod app; +mod config; +mod integrations; +mod models; +mod poller; +mod util; +mod views; + +use anyhow::Result; +use app::App; +use config::{load_from_path, parse_args}; +fn main() -> Result<()> { + let Some(config_path) = parse_args()? else { + return Ok(()); + }; + let config = load_from_path(&config_path)?; + + let mut terminal = ratatui::init(); + let result = App::new(config).run(&mut terminal); + ratatui::restore(); + result +} diff --git a/tui/src/main.zig b/tui/src/main.zig deleted file mode 100644 index ab40e19..0000000 --- a/tui/src/main.zig +++ /dev/null @@ -1,73 +0,0 @@ -const std = @import("std"); -const config = @import("config.zig"); -const App = @import("app.zig").App; - -pub fn main() !void { - var gpa_impl: std.heap.GeneralPurposeAllocator(.{}) = .init; - defer _ = gpa_impl.deinit(); - const alloc = gpa_impl.allocator(); - - const config_path = parseArgs() catch |err| { - if (err == error.HelpRequested) return; - return err; - }; - - const cfg = config.load(alloc, config_path) catch |err| { - std.log.err("configuration error: {}", .{err}); - std.process.exit(1); - }; - - var app = App.init(alloc, cfg) catch |err| { - std.log.err("failed to initialize TUI: {}", .{err}); - std.process.exit(1); - }; - - app.run(alloc) catch |err| { - app.restoreTerminal(alloc); - std.log.err("runtime error: {}", .{err}); - std.process.exit(1); - }; - - app.restoreTerminal(alloc); - std.process.exit(0); -} - -fn parseArgs() ![]const u8 { - var args = std.process.args(); - _ = args.skip(); // program name - - while (args.next()) |arg| { - if (std.mem.eql(u8, arg, "--config") or std.mem.eql(u8, arg, "-c")) { - return args.next() orelse { - std.log.err("--config requires a path argument", .{}); - return error.MissingArgument; - }; - } - if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) { - _ = std.posix.write(std.posix.STDOUT_FILENO, - \\vitui - TUI for pvt cluster management - \\ - \\Usage: vitui [options] - \\ - \\Options: - \\ -c, --config Path to pvt.yaml config file - \\ -h, --help Show this help message - \\ - \\If --config is not specified, vitui searches: - \\ $PVT_CONFIG, ./pvt.yaml, ~/.config/pvt/config.yaml - \\ - ) catch {}; - return error.HelpRequested; - } - } - - // No --config flag: try to discover - return config.discover() catch { - std.log.err("no config file found (use --config or set $PVT_CONFIG)", .{}); - return error.MissingArgument; - }; -} - -test { - _ = config; -} diff --git a/tui/src/models.rs b/tui/src/models.rs new file mode 100644 index 0000000..14b5303 --- /dev/null +++ b/tui/src/models.rs @@ -0,0 +1,102 @@ +#[derive(Debug, Clone, Default)] +pub struct Snapshot { + pub cluster_rows: Vec, + pub storage_pools: Vec, + pub vm_disks: Vec, + pub backups: Vec, + pub k8s_backups: Vec, + pub hosts: Vec, + pub pods: Vec, + pub metrics_available: bool, + pub loading: bool, + pub last_refresh_label: Option, + pub last_error: Option, +} + +#[derive(Debug, Clone)] +pub struct ClusterRow { + pub name: String, + pub role: String, + pub ip: String, + pub pve_node: String, + pub vmid: String, + pub talos_version: String, + pub kubernetes_version: String, + pub etcd: String, + pub health: String, +} + +#[derive(Debug, Clone)] +pub struct StoragePoolRow { + pub name: String, + pub node: String, + pub pool_type: String, + pub used_str: String, + pub total_str: String, + pub status: String, + pub usage_pct: f64, +} + +#[derive(Debug, Clone)] +pub struct VmDiskRow { + pub vm_name: String, + pub vmid: String, + pub node: String, + pub size_str: String, + pub size_bytes: i64, +} + +#[derive(Debug, Clone)] +pub struct BackupRow { + pub proxmox_cluster: String, + pub volid: String, + pub node: String, + pub storage: String, + pub vm_name: String, + pub vmid: String, + pub size_str: String, + pub date_str: String, + pub age_days: u32, + pub is_stale: bool, +} + +#[derive(Debug, Clone)] +pub struct K8sBackupRow { + pub name: String, + pub namespace: String, + pub source_type: String, + pub status: String, + pub schedule: String, + pub last_run: String, +} + +#[derive(Debug, Clone)] +pub struct HostRow { + pub name: String, + pub cpu_pct: f64, + pub mem_used_str: String, + pub mem_total_str: String, + pub mem_pct: f64, +} + +#[derive(Debug, Clone)] +pub struct PodMetricRow { + pub pod: String, + pub namespace: String, + pub cpu_str: String, + pub mem_str: String, + pub net_rx_str: String, + pub net_tx_str: String, + pub cpu_cores: f64, + pub mem_bytes: f64, + pub net_rx_bytes_sec: f64, + pub net_tx_bytes_sec: f64, +} + +#[derive(Debug, Clone)] +pub struct DeleteAction { + pub proxmox_cluster: String, + pub node: String, + pub storage: String, + pub volid: String, +} diff --git a/tui/src/poll.zig b/tui/src/poll.zig deleted file mode 100644 index cbdf1c7..0000000 --- a/tui/src/poll.zig +++ /dev/null @@ -1,969 +0,0 @@ -const std = @import("std"); -const config = @import("config.zig"); -const proxmox = @import("api/proxmox.zig"); -const talos = @import("api/talos.zig"); -const kubernetes = @import("api/kubernetes.zig"); -const metrics_api = @import("api/metrics.zig"); -const Allocator = std.mem.Allocator; - -/// A single row in the cluster view table. -/// All fields are display-ready strings. -pub const NodeRow = struct { - name: []const u8, - role: []const u8, - ip: []const u8, - pve_node: []const u8, - vmid: []const u8, - talos_ver: []const u8, - k8s_ver: []const u8, - etcd: []const u8, - health: []const u8, -}; - -/// A single row in the storage pools table. -pub const StoragePoolRow = struct { - name: []const u8, - node: []const u8, - pool_type: []const u8, - used_str: []const u8, - total_str: []const u8, - status: []const u8, - usage_pct: f64, -}; - -/// A single row in the VM disks table. -pub const VmDiskRow = struct { - vm_name: []const u8, - vmid: []const u8, - pool: []const u8, - size_str: []const u8, - size_bytes: i64, -}; - -/// Format bytes into a human-readable string (e.g., "42.1 GiB"). -pub fn formatBytes(alloc: Allocator, bytes: i64) []const u8 { - const fb: f64 = @floatFromInt(@max(bytes, 0)); - if (fb >= 1024.0 * 1024.0 * 1024.0 * 1024.0) { - return std.fmt.allocPrint(alloc, "{d:.1} TiB", .{fb / (1024.0 * 1024.0 * 1024.0 * 1024.0)}) catch "? TiB"; - } else if (fb >= 1024.0 * 1024.0 * 1024.0) { - return std.fmt.allocPrint(alloc, "{d:.1} GiB", .{fb / (1024.0 * 1024.0 * 1024.0)}) catch "? GiB"; - } else if (fb >= 1024.0 * 1024.0) { - return std.fmt.allocPrint(alloc, "{d:.1} MiB", .{fb / (1024.0 * 1024.0)}) catch "? MiB"; - } else { - return std.fmt.allocPrint(alloc, "{d:.0} KiB", .{fb / 1024.0}) catch "? KiB"; - } -} - -/// Thread-safe shared state for storage view data. -pub const StorageState = struct { - mutex: std.Thread.Mutex = .{}, - pools: []StoragePoolRow = &.{}, - vm_disks: []VmDiskRow = &.{}, - is_loading: bool = true, - last_refresh_epoch: i64 = 0, - allocator: Allocator, - - pub fn init(allocator: Allocator) StorageState { - return .{ .allocator = allocator }; - } - - pub fn swapData(self: *StorageState, new_pools: []StoragePoolRow, new_disks: []VmDiskRow) void { - self.mutex.lock(); - defer self.mutex.unlock(); - self.freeDataInternal(); - self.pools = new_pools; - self.vm_disks = new_disks; - self.is_loading = false; - self.last_refresh_epoch = std.time.timestamp(); - } - - pub fn lock(self: *StorageState) void { - self.mutex.lock(); - } - - pub fn unlock(self: *StorageState) void { - self.mutex.unlock(); - } - - pub fn isLoading(self: *StorageState) bool { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.is_loading; - } - - fn freeDataInternal(self: *StorageState) void { - for (self.pools) |row| { - self.allocator.free(row.name); - self.allocator.free(row.node); - self.allocator.free(row.pool_type); - self.allocator.free(row.used_str); - self.allocator.free(row.total_str); - self.allocator.free(row.status); - } - if (self.pools.len > 0) self.allocator.free(self.pools); - for (self.vm_disks) |row| { - self.allocator.free(row.vm_name); - self.allocator.free(row.vmid); - self.allocator.free(row.pool); - self.allocator.free(row.size_str); - } - if (self.vm_disks.len > 0) self.allocator.free(self.vm_disks); - } - - pub fn deinit(self: *StorageState) void { - self.freeDataInternal(); - } -}; - -/// A single row in the backups table. -pub const BackupRow = struct { - proxmox_cluster: []const u8, - volid: []const u8, - node: []const u8, - storage: []const u8, - vm_name: []const u8, - vmid: []const u8, - size_str: []const u8, - date_str: []const u8, - age_days: u32, - is_stale: bool, -}; - -/// A single K8s backup row (VolSync/Velero). -pub const K8sBackupRow = struct { - name: []const u8, - namespace: []const u8, - source_type: []const u8, - status: []const u8, - schedule: []const u8, - last_run: []const u8, -}; - -/// Thread-safe shared state for backup view data. -pub const BackupState = struct { - mutex: std.Thread.Mutex = .{}, - backups: []BackupRow = &.{}, - k8s_backups: []K8sBackupRow = &.{}, - is_loading: bool = true, - last_refresh_epoch: i64 = 0, - allocator: Allocator, - - pub fn init(allocator: Allocator) BackupState { - return .{ .allocator = allocator }; - } - - pub fn swapData(self: *BackupState, new_backups: []BackupRow, new_k8s: []K8sBackupRow) void { - self.mutex.lock(); - defer self.mutex.unlock(); - self.freeDataInternal(); - self.backups = new_backups; - self.k8s_backups = new_k8s; - self.is_loading = false; - self.last_refresh_epoch = std.time.timestamp(); - } - - pub fn lock(self: *BackupState) void { - self.mutex.lock(); - } - - pub fn unlock(self: *BackupState) void { - self.mutex.unlock(); - } - - pub fn isLoading(self: *BackupState) bool { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.is_loading; - } - - fn freeDataInternal(self: *BackupState) void { - for (self.backups) |row| { - self.allocator.free(row.proxmox_cluster); - self.allocator.free(row.volid); - self.allocator.free(row.node); - self.allocator.free(row.storage); - self.allocator.free(row.vm_name); - self.allocator.free(row.vmid); - self.allocator.free(row.size_str); - self.allocator.free(row.date_str); - } - if (self.backups.len > 0) self.allocator.free(self.backups); - for (self.k8s_backups) |row| { - self.allocator.free(row.name); - self.allocator.free(row.namespace); - self.allocator.free(row.source_type); - self.allocator.free(row.status); - self.allocator.free(row.schedule); - self.allocator.free(row.last_run); - } - if (self.k8s_backups.len > 0) self.allocator.free(self.k8s_backups); - } - - pub fn deinit(self: *BackupState) void { - self.freeDataInternal(); - } -}; - -/// A single row in the host overview (PVE node metrics). -pub const HostRow = struct { - name: []const u8, - cpu_pct: f64, // 0-100 - mem_used_str: []const u8, - mem_total_str: []const u8, - mem_pct: f64, // 0-100 -}; - -/// A single row in the pod metrics table. -pub const PodMetricRow = struct { - pod: []const u8, - namespace: []const u8, - cpu_str: []const u8, // e.g. "0.125" - mem_str: []const u8, // e.g. "128.5 MiB" - net_rx_str: []const u8, // e.g. "1.2 KiB/s" - net_tx_str: []const u8, // e.g. "0.5 KiB/s" - cpu_cores: f64, // for sorting - mem_bytes: f64, // for sorting - net_rx_bytes_sec: f64, // for sorting - net_tx_bytes_sec: f64, // for sorting -}; - -/// Thread-safe shared state for performance view data. -pub const PerfState = struct { - mutex: std.Thread.Mutex = .{}, - hosts: []HostRow = &.{}, - pods: []PodMetricRow = &.{}, - metrics_available: bool = false, - is_loading: bool = true, - last_refresh_epoch: i64 = 0, - allocator: Allocator, - - pub fn init(allocator: Allocator) PerfState { - return .{ .allocator = allocator }; - } - - pub fn swapData(self: *PerfState, new_hosts: []HostRow, new_pods: []PodMetricRow, available: bool) void { - self.mutex.lock(); - defer self.mutex.unlock(); - self.freeDataInternal(); - self.hosts = new_hosts; - self.pods = new_pods; - self.metrics_available = available; - self.is_loading = false; - self.last_refresh_epoch = std.time.timestamp(); - } - - pub fn lock(self: *PerfState) void { - self.mutex.lock(); - } - - pub fn unlock(self: *PerfState) void { - self.mutex.unlock(); - } - - pub fn isMetricsAvailable(self: *PerfState) bool { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.metrics_available; - } - - pub fn isLoading(self: *PerfState) bool { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.is_loading; - } - - fn freeDataInternal(self: *PerfState) void { - for (self.hosts) |row| { - self.allocator.free(row.name); - self.allocator.free(row.mem_used_str); - self.allocator.free(row.mem_total_str); - } - if (self.hosts.len > 0) self.allocator.free(self.hosts); - for (self.pods) |row| { - self.allocator.free(row.pod); - self.allocator.free(row.namespace); - self.allocator.free(row.cpu_str); - self.allocator.free(row.mem_str); - self.allocator.free(row.net_rx_str); - self.allocator.free(row.net_tx_str); - } - if (self.pods.len > 0) self.allocator.free(self.pods); - } - - pub fn deinit(self: *PerfState) void { - self.freeDataInternal(); - } -}; - -/// Format a rate in bytes/sec into a human-readable string. -pub fn formatRate(alloc: Allocator, bytes_per_sec: f64) []const u8 { - if (bytes_per_sec >= 1024.0 * 1024.0) { - return std.fmt.allocPrint(alloc, "{d:.1} MiB/s", .{bytes_per_sec / (1024.0 * 1024.0)}) catch "? MiB/s"; - } else if (bytes_per_sec >= 1024.0) { - return std.fmt.allocPrint(alloc, "{d:.1} KiB/s", .{bytes_per_sec / 1024.0}) catch "? KiB/s"; - } else { - return std.fmt.allocPrint(alloc, "{d:.0} B/s", .{bytes_per_sec}) catch "? B/s"; - } -} - -/// Format an epoch timestamp into "YYYY-MM-DD HH:MM". -pub fn formatEpoch(alloc: Allocator, epoch: i64) []const u8 { - const es = std.time.epoch.EpochSeconds{ .secs = @intCast(@max(0, epoch)) }; - const day = es.getEpochDay(); - const yd = day.calculateYearDay(); - const md = yd.calculateMonthDay(); - const ds = es.getDaySeconds(); - return std.fmt.allocPrint(alloc, "{d:0>4}-{d:0>2}-{d:0>2} {d:0>2}:{d:0>2}", .{ - yd.year, - md.month.numeric(), - md.day_index + 1, - ds.getHoursIntoDay(), - ds.getMinutesIntoHour(), - }) catch "unknown"; -} - -/// Thread-safe shared state for cluster view data. -pub const ClusterState = struct { - mutex: std.Thread.Mutex = .{}, - rows: []NodeRow = &.{}, - is_loading: bool = true, - error_msg: ?[]const u8 = null, - last_refresh_epoch: i64 = 0, - allocator: Allocator, - - pub fn init(allocator: Allocator) ClusterState { - return .{ .allocator = allocator }; - } - - /// Replace current rows with new data. Frees old rows under mutex. - pub fn swapRows(self: *ClusterState, new_rows: []NodeRow) void { - self.mutex.lock(); - defer self.mutex.unlock(); - - self.freeRowsInternal(); - self.rows = new_rows; - self.is_loading = false; - self.last_refresh_epoch = std.time.timestamp(); - } - - pub fn lock(self: *ClusterState) void { - self.mutex.lock(); - } - - pub fn unlock(self: *ClusterState) void { - self.mutex.unlock(); - } - - pub fn getLastRefresh(self: *ClusterState) i64 { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.last_refresh_epoch; - } - - pub fn isLoading(self: *ClusterState) bool { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.is_loading; - } - - fn freeRowsInternal(self: *ClusterState) void { - for (self.rows) |row| { - self.allocator.free(row.name); - self.allocator.free(row.role); - self.allocator.free(row.ip); - self.allocator.free(row.pve_node); - self.allocator.free(row.vmid); - self.allocator.free(row.talos_ver); - self.allocator.free(row.k8s_ver); - self.allocator.free(row.etcd); - self.allocator.free(row.health); - } - if (self.rows.len > 0) { - self.allocator.free(self.rows); - } - } - - pub fn deinit(self: *ClusterState) void { - self.freeRowsInternal(); - } -}; - -/// Background poller that fetches data from Proxmox and Talos APIs. -pub const Poller = struct { - state: *ClusterState, - storage_state: *StorageState, - backup_state: *BackupState, - perf_state: *PerfState, - cfg: *const config.Config, - interval_ns: u64, - should_stop: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), - force_refresh: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), - thread: ?std.Thread = null, - refresh_context: ?*anyopaque = null, - refresh_callback: ?*const fn (*anyopaque) void = null, - allocator: Allocator, - - pub fn init( - allocator: Allocator, - state: *ClusterState, - storage_state: *StorageState, - backup_state: *BackupState, - perf_state: *PerfState, - cfg: *const config.Config, - interval_ms: u64, - ) Poller { - return .{ - .state = state, - .storage_state = storage_state, - .backup_state = backup_state, - .perf_state = perf_state, - .cfg = cfg, - .interval_ns = interval_ms * std.time.ns_per_ms, - .allocator = allocator, - }; - } - - pub fn start(self: *Poller) !void { - self.thread = try std.Thread.spawn(.{}, pollLoop, .{self}); - } - - pub fn stop(self: *Poller) void { - self.should_stop.store(true, .release); - if (self.thread) |t| { - t.join(); - self.thread = null; - } - } - - pub fn triggerRefresh(self: *Poller) void { - self.force_refresh.store(true, .release); - } - - pub fn setRefreshNotifier( - self: *Poller, - context: *anyopaque, - callback: *const fn (*anyopaque) void, - ) void { - self.refresh_context = context; - self.refresh_callback = callback; - } - - fn pollLoop(self: *Poller) void { - while (!self.should_stop.load(.acquire)) { - self.fetchAll(); - - // Sleep in small increments to allow responsive stopping/force-refresh - var slept: u64 = 0; - const step = 500 * std.time.ns_per_ms; // 500ms increments - while (slept < self.interval_ns) { - if (self.should_stop.load(.acquire)) return; - if (self.force_refresh.load(.acquire)) { - self.force_refresh.store(false, .release); - break; - } - std.Thread.sleep(step); - slept += step; - } - } - } - - fn fetchAll(self: *Poller) void { - const alloc = self.allocator; - - // Collect all configured nodes - var rows_list: std.ArrayListUnmanaged(NodeRow) = .empty; - - for (self.cfg.clusters) |cluster| { - // Find the matching PVE cluster config - var pve_cluster: ?config.ProxmoxCluster = null; - for (self.cfg.proxmox.clusters) |pc| { - if (std.mem.eql(u8, pc.name, cluster.proxmox_cluster)) { - pve_cluster = pc; - break; - } - } - - // Fetch PVE VM statuses - var pve_vms: []proxmox.VmStatus = &.{}; - if (pve_cluster) |pc| { - var pve_client = proxmox.ProxmoxClient.init(alloc, pc); - defer pve_client.deinit(); - pve_vms = pve_client.getClusterResources() catch &.{}; - } - defer { - for (pve_vms) |vm| { - alloc.free(vm.name); - alloc.free(vm.status); - alloc.free(vm.node); - } - if (pve_vms.len > 0) alloc.free(pve_vms); - } - - // Fetch Talos etcd members - var talos_client = talos.TalosClient.init(alloc, self.cfg.talos); - defer talos_client.deinit(); - const etcd_members = talos_client.getEtcdMembers(); - defer { - for (etcd_members) |m| alloc.free(m.hostname); - if (etcd_members.len > 0) alloc.free(etcd_members); - } - - // Build a row for each configured node - for (cluster.nodes) |node| { - // Match PVE VM status by VMID - var vm_status: []const u8 = "unknown"; - for (pve_vms) |vm| { - if (vm.vmid == node.proxmox_vmid) { - vm_status = vm.status; - break; - } - } - - // Match etcd membership by hostname - var etcd_role: []const u8 = "-"; - for (etcd_members) |m| { - if (std.mem.eql(u8, m.hostname, node.name)) { - etcd_role = if (m.is_learner) "learner" else "member"; - break; - } - } - - // Fetch Talos version for this node - var talos_ver: []const u8 = "-"; - var k8s_ver: []const u8 = "-"; - var version_result: ?talos.TalosVersion = null; - if (talos_client.getVersion(node.ip)) |ver| { - version_result = ver; - talos_ver = ver.talos_version; - k8s_ver = ver.kubernetes_version; - } - defer if (version_result) |ver| { - alloc.free(ver.node); - alloc.free(ver.talos_version); - alloc.free(ver.kubernetes_version); - }; - - // Determine health - const health: []const u8 = if (std.mem.eql(u8, vm_status, "running")) - (if (!std.mem.eql(u8, talos_ver, "-")) "healthy" else "degraded") - else if (std.mem.eql(u8, vm_status, "stopped")) - "stopped" - else - "unknown"; - - const vmid_str = std.fmt.allocPrint(alloc, "{d}", .{node.proxmox_vmid}) catch continue; - - rows_list.append(alloc, .{ - .name = alloc.dupe(u8, node.name) catch continue, - .role = alloc.dupe(u8, node.role) catch continue, - .ip = alloc.dupe(u8, node.ip) catch continue, - .pve_node = alloc.dupe(u8, node.proxmox_node) catch continue, - .vmid = vmid_str, - .talos_ver = alloc.dupe(u8, talos_ver) catch continue, - .k8s_ver = alloc.dupe(u8, k8s_ver) catch continue, - .etcd = alloc.dupe(u8, etcd_role) catch continue, - .health = alloc.dupe(u8, health) catch continue, - }) catch continue; - } - } - - const new_rows = rows_list.toOwnedSlice(alloc) catch return; - self.state.swapRows(new_rows); - - // Fetch storage data - self.fetchStorage(); - - // Fetch backup data - self.fetchBackups(); - - // Fetch performance data - self.fetchPerformance(); - - if (self.refresh_callback) |callback| { - if (self.refresh_context) |context| { - callback(context); - } - } - } - - fn fetchStorage(self: *Poller) void { - const alloc = self.allocator; - var pools_list: std.ArrayListUnmanaged(StoragePoolRow) = .empty; - var disks_list: std.ArrayListUnmanaged(VmDiskRow) = .empty; - - for (self.cfg.proxmox.clusters) |pc| { - var pve_client = proxmox.ProxmoxClient.init(alloc, pc); - defer pve_client.deinit(); - - // Fetch storage pools - const storage_pools = pve_client.getStoragePools() catch &.{}; - defer { - for (storage_pools) |sp| { - alloc.free(sp.name); - alloc.free(sp.node); - alloc.free(sp.pool_type); - alloc.free(sp.status); - } - if (storage_pools.len > 0) alloc.free(storage_pools); - } - - for (storage_pools) |sp| { - const pct: f64 = if (sp.maxdisk > 0) - @as(f64, @floatFromInt(sp.disk)) / @as(f64, @floatFromInt(sp.maxdisk)) * 100.0 - else - 0.0; - - pools_list.append(alloc, .{ - .name = alloc.dupe(u8, sp.name) catch continue, - .node = alloc.dupe(u8, sp.node) catch continue, - .pool_type = alloc.dupe(u8, sp.pool_type) catch continue, - .used_str = formatBytes(alloc, sp.disk), - .total_str = formatBytes(alloc, sp.maxdisk), - .status = alloc.dupe(u8, sp.status) catch continue, - .usage_pct = pct, - }) catch continue; - } - - // Fetch VMs for disk info - const vms = pve_client.getClusterResources() catch &.{}; - defer { - for (vms) |vm| { - alloc.free(vm.name); - alloc.free(vm.status); - alloc.free(vm.node); - } - if (vms.len > 0) alloc.free(vms); - } - - for (vms) |vm| { - disks_list.append(alloc, .{ - .vm_name = alloc.dupe(u8, vm.name) catch continue, - .vmid = std.fmt.allocPrint(alloc, "{d}", .{vm.vmid}) catch continue, - .pool = alloc.dupe(u8, vm.node) catch continue, - .size_str = formatBytes(alloc, vm.maxdisk), - .size_bytes = vm.maxdisk, - }) catch continue; - } - } - - const new_pools = pools_list.toOwnedSlice(alloc) catch return; - const new_disks = disks_list.toOwnedSlice(alloc) catch return; - self.storage_state.swapData(new_pools, new_disks); - } - - fn fetchBackups(self: *Poller) void { - const alloc = self.allocator; - var backups_list: std.ArrayListUnmanaged(BackupRow) = .empty; - - for (self.cfg.proxmox.clusters) |pc| { - var pve_client = proxmox.ProxmoxClient.init(alloc, pc); - defer pve_client.deinit(); - - // Get storage pools to know where to look for backups - const pools = pve_client.getStoragePools() catch &.{}; - defer { - for (pools) |sp| { - alloc.free(sp.name); - alloc.free(sp.node); - alloc.free(sp.pool_type); - alloc.free(sp.status); - } - if (pools.len > 0) alloc.free(pools); - } - - // Get VMs for name lookup - const vms = pve_client.getClusterResources() catch &.{}; - defer { - for (vms) |vm| { - alloc.free(vm.name); - alloc.free(vm.status); - alloc.free(vm.node); - } - if (vms.len > 0) alloc.free(vms); - } - - // For each storage pool, list backups - for (pools) |sp| { - const entries = pve_client.listBackups(sp.node, sp.name) catch &.{}; - defer { - for (entries) |e| { - alloc.free(e.volid); - alloc.free(e.node); - alloc.free(e.storage); - alloc.free(e.format); - } - if (entries.len > 0) alloc.free(entries); - } - - for (entries) |entry| { - // Find VM name by VMID - var vm_name: []const u8 = "unknown"; - for (vms) |vm| { - if (vm.vmid == entry.vmid) { - vm_name = vm.name; - break; - } - } - - // Compute age - const now = std.time.timestamp(); - const age_secs = now - entry.ctime; - const age_days: u32 = @intCast(@max(0, @divTrunc(age_secs, 86400))); - const is_stale = age_days > self.cfg.tui_settings.stale_days; - - backups_list.append(alloc, .{ - .proxmox_cluster = alloc.dupe(u8, pc.name) catch continue, - .volid = alloc.dupe(u8, entry.volid) catch continue, - .node = alloc.dupe(u8, entry.node) catch continue, - .storage = alloc.dupe(u8, entry.storage) catch continue, - .vm_name = alloc.dupe(u8, vm_name) catch continue, - .vmid = std.fmt.allocPrint(alloc, "{d}", .{entry.vmid}) catch continue, - .size_str = formatBytes(alloc, entry.size), - .date_str = formatEpoch(alloc, entry.ctime), - .age_days = age_days, - .is_stale = is_stale, - }) catch continue; - } - } - } - - const new_backups = backups_list.toOwnedSlice(alloc) catch return; - const new_k8s = self.fetchK8sBackups(); - self.backup_state.swapData(new_backups, new_k8s); - } - - fn fetchK8sBackups(self: *Poller) []K8sBackupRow { - const alloc = self.allocator; - const kubeconfig = kubernetes.deriveKubeconfig(alloc, self.cfg.talos.config_path) orelse return &.{}; - defer alloc.free(kubeconfig); - - var client = kubernetes.KubeClient.init(alloc, kubeconfig); - defer client.deinit(); - - const providers = client.detectProviders(); - var k8s_list: std.ArrayListUnmanaged(K8sBackupRow) = .empty; - - if (providers.volsync) { - const entries = client.getVolsyncSources(); - defer { - for (entries) |e| { - alloc.free(e.name); - alloc.free(e.namespace); - alloc.free(e.source_type); - alloc.free(e.status); - alloc.free(e.schedule); - alloc.free(e.last_run); - } - if (entries.len > 0) alloc.free(entries); - } - for (entries) |e| { - k8s_list.append(alloc, .{ - .name = alloc.dupe(u8, e.name) catch continue, - .namespace = alloc.dupe(u8, e.namespace) catch continue, - .source_type = alloc.dupe(u8, e.source_type) catch continue, - .status = alloc.dupe(u8, e.status) catch continue, - .schedule = alloc.dupe(u8, e.schedule) catch continue, - .last_run = alloc.dupe(u8, e.last_run) catch continue, - }) catch continue; - } - } - - if (providers.velero) { - const entries = client.getVeleroBackups(); - defer { - for (entries) |e| { - alloc.free(e.name); - alloc.free(e.namespace); - alloc.free(e.source_type); - alloc.free(e.status); - alloc.free(e.schedule); - alloc.free(e.last_run); - } - if (entries.len > 0) alloc.free(entries); - } - for (entries) |e| { - k8s_list.append(alloc, .{ - .name = alloc.dupe(u8, e.name) catch continue, - .namespace = alloc.dupe(u8, e.namespace) catch continue, - .source_type = alloc.dupe(u8, e.source_type) catch continue, - .status = alloc.dupe(u8, e.status) catch continue, - .schedule = alloc.dupe(u8, e.schedule) catch continue, - .last_run = alloc.dupe(u8, e.last_run) catch continue, - }) catch continue; - } - } - - return k8s_list.toOwnedSlice(alloc) catch &.{}; - } - - fn fetchPerformance(self: *Poller) void { - const alloc = self.allocator; - - // Host metrics from PVE API - var hosts_list: std.ArrayListUnmanaged(HostRow) = .empty; - for (self.cfg.proxmox.clusters) |pc| { - var pve_client = proxmox.ProxmoxClient.init(alloc, pc); - defer pve_client.deinit(); - - // Get distinct node names from cluster resources - const vms = pve_client.getClusterResources() catch &.{}; - defer { - for (vms) |vm| { - alloc.free(vm.name); - alloc.free(vm.status); - alloc.free(vm.node); - } - if (vms.len > 0) alloc.free(vms); - } - - // Collect unique node names - var seen_nodes: std.ArrayListUnmanaged([]const u8) = .empty; - defer { - for (seen_nodes.items) |n| alloc.free(n); - seen_nodes.deinit(alloc); - } - - for (vms) |vm| { - var found = false; - for (seen_nodes.items) |n| { - if (std.mem.eql(u8, n, vm.node)) { - found = true; - break; - } - } - if (!found) { - seen_nodes.append(alloc, alloc.dupe(u8, vm.node) catch continue) catch continue; - } - } - - for (seen_nodes.items) |node_name| { - const ns = pve_client.getNodeStatus(node_name) catch continue orelse continue; - const mem_pct: f64 = if (ns.maxmem > 0) - @as(f64, @floatFromInt(ns.mem)) / @as(f64, @floatFromInt(ns.maxmem)) * 100.0 - else - 0.0; - - hosts_list.append(alloc, .{ - .name = alloc.dupe(u8, ns.node) catch continue, - .cpu_pct = ns.cpu * 100.0, - .mem_used_str = formatBytes(alloc, ns.mem), - .mem_total_str = formatBytes(alloc, ns.maxmem), - .mem_pct = mem_pct, - }) catch continue; - - alloc.free(ns.node); - alloc.free(ns.status); - } - } - - // Pod metrics from Prometheus/VictoriaMetrics - var pods_list: std.ArrayListUnmanaged(PodMetricRow) = .empty; - var metrics_available = false; - - const kubeconfig = kubernetes.deriveKubeconfig(alloc, self.cfg.talos.config_path); - if (kubeconfig) |kc| { - defer alloc.free(kc); - - var mc = metrics_api.MetricsClient.init(alloc, kc); - defer mc.deinit(); - - if (mc.available) { - metrics_available = true; - - const cpu_data = mc.getPodCpu(); - defer self.freeMetricValues(cpu_data); - - const mem_data = mc.getPodMemory(); - defer self.freeMetricValues(mem_data); - - const rx_data = mc.getPodNetRx(); - defer self.freeMetricValues(rx_data); - - const tx_data = mc.getPodNetTx(); - defer self.freeMetricValues(tx_data); - - // Build pod map from CPU data (most common metric) - for (cpu_data) |cpu| { - const pod_name = getLabelStr(cpu.labels, "pod"); - const ns_name = getLabelStr(cpu.labels, "namespace"); - - // Find matching memory - var mem_val: f64 = 0; - for (mem_data) |m| { - if (std.mem.eql(u8, getLabelStr(m.labels, "pod"), pod_name) and - std.mem.eql(u8, getLabelStr(m.labels, "namespace"), ns_name)) - { - mem_val = m.value; - break; - } - } - - // Find matching network - var rx_val: f64 = 0; - var tx_val: f64 = 0; - for (rx_data) |r| { - if (std.mem.eql(u8, getLabelStr(r.labels, "pod"), pod_name) and - std.mem.eql(u8, getLabelStr(r.labels, "namespace"), ns_name)) - { - rx_val = r.value; - break; - } - } - for (tx_data) |t| { - if (std.mem.eql(u8, getLabelStr(t.labels, "pod"), pod_name) and - std.mem.eql(u8, getLabelStr(t.labels, "namespace"), ns_name)) - { - tx_val = t.value; - break; - } - } - - pods_list.append(alloc, .{ - .pod = alloc.dupe(u8, pod_name) catch continue, - .namespace = alloc.dupe(u8, ns_name) catch continue, - .cpu_str = std.fmt.allocPrint(alloc, "{d:.3}", .{cpu.value}) catch continue, - .mem_str = formatBytes(alloc, @intFromFloat(@max(0, mem_val))), - .net_rx_str = formatRate(alloc, rx_val), - .net_tx_str = formatRate(alloc, tx_val), - .cpu_cores = cpu.value, - .mem_bytes = mem_val, - .net_rx_bytes_sec = rx_val, - .net_tx_bytes_sec = tx_val, - }) catch continue; - } - } - } - - const new_hosts = hosts_list.toOwnedSlice(alloc) catch return; - const new_pods = pods_list.toOwnedSlice(alloc) catch return; - self.perf_state.swapData(new_hosts, new_pods, metrics_available); - } - - fn freeMetricValues(self: *Poller, values: []metrics_api.MetricsClient.PodMetricValue) void { - for (values) |v| { - var it = v.labels.iterator(); - while (it.next()) |entry| { - self.allocator.free(entry.key_ptr.*); - switch (entry.value_ptr.*) { - .string => |s| self.allocator.free(s), - else => {}, - } - } - var labels_copy = v.labels; - labels_copy.deinit(); - } - if (values.len > 0) self.allocator.free(values); - } - - pub fn deinit(self: *Poller) void { - self.stop(); - } -}; - -fn getLabelStr(labels: std.json.ObjectMap, key: []const u8) []const u8 { - const val = labels.get(key) orelse return ""; - return switch (val) { - .string => |s| s, - else => "", - }; -} diff --git a/tui/src/poller.rs b/tui/src/poller.rs new file mode 100644 index 0000000..7d57527 --- /dev/null +++ b/tui/src/poller.rs @@ -0,0 +1,479 @@ +use crate::config::Config; +use crate::integrations::{kubernetes, metrics::MetricsClient, proxmox, talos}; +use crate::models::{ + BackupRow, ClusterRow, DeleteAction, HostRow, K8sBackupRow, PodMetricRow, Snapshot, + StoragePoolRow, VmDiskRow, +}; +use crate::util::{age_days, format_bytes, format_epoch, format_rate, format_system_time}; +use std::cmp::Reverse; +use std::collections::{BTreeSet, HashMap}; +use std::sync::mpsc::{self, Receiver, RecvTimeoutError, Sender}; +use std::thread::{self, JoinHandle}; +use std::time::SystemTime; + +pub enum PollerCommand { + RefreshNow, + DeleteBackup(DeleteAction), + Shutdown, +} + +pub struct PollerHandle { + pub snapshots: Receiver, + pub commands: Sender, + join: Option>, +} + +impl PollerHandle { + pub fn spawn(config: Config) -> Self { + let (snapshot_tx, snapshot_rx) = mpsc::channel(); + let (command_tx, command_rx) = mpsc::channel(); + let join = thread::spawn(move || run_loop(config, snapshot_tx, command_rx)); + Self { + snapshots: snapshot_rx, + commands: command_tx, + join: Some(join), + } + } +} + +impl Drop for PollerHandle { + fn drop(&mut self) { + let _ = self.commands.send(PollerCommand::Shutdown); + if let Some(join) = self.join.take() { + let _ = join.join(); + } + } +} + +fn run_loop(config: Config, snapshots: Sender, commands: Receiver) { + let _ = snapshots.send(refresh_snapshot(&config, None)); + let interval = config.tui.refresh_interval; + loop { + match commands.recv_timeout(interval) { + Ok(PollerCommand::RefreshNow) => { + let _ = snapshots.send(refresh_snapshot(&config, None)); + } + Ok(PollerCommand::DeleteBackup(action)) => { + let outcome = delete_backup(&config, &action) + .err() + .map(|err| err.to_string()); + let _ = snapshots.send(refresh_snapshot(&config, outcome)); + } + Ok(PollerCommand::Shutdown) => break, + Err(RecvTimeoutError::Timeout) => { + let _ = snapshots.send(refresh_snapshot(&config, None)); + } + Err(RecvTimeoutError::Disconnected) => break, + } + } +} + +fn delete_backup(config: &Config, action: &DeleteAction) -> anyhow::Result<()> { + let Some(cluster) = config + .proxmox + .clusters + .iter() + .find(|cluster| cluster.name == action.proxmox_cluster) + else { + anyhow::bail!("unknown proxmox cluster {}", action.proxmox_cluster); + }; + proxmox::delete_backup(cluster, &action.node, &action.storage, &action.volid) +} + +pub fn refresh_snapshot(config: &Config, command_error: Option) -> Snapshot { + let mut errors = Vec::new(); + if let Some(error) = command_error { + errors.push(error); + } + + let mut snapshot = Snapshot { + loading: false, + ..Snapshot::default() + }; + let mut discovered_node_ips: HashMap = HashMap::new(); + let mut discovered_node_roles: HashMap = HashMap::new(); + + let mut vm_resources_by_cluster: HashMap> = HashMap::new(); + let mut storage_by_cluster: HashMap> = HashMap::new(); + let mut node_status_by_cluster: HashMap> = + HashMap::new(); + + for cluster in &config.proxmox.clusters { + match proxmox::get_cluster_resources(cluster) { + Ok(resources) => { + vm_resources_by_cluster.insert(cluster.name.clone(), resources); + } + Err(err) => errors.push(format!("{} VM resources: {err}", cluster.name)), + } + match proxmox::get_storage_pools(cluster) { + Ok(storage) => { + storage_by_cluster.insert(cluster.name.clone(), storage); + } + Err(err) => errors.push(format!("{} storage pools: {err}", cluster.name)), + } + + let mut node_statuses = HashMap::new(); + let unique_nodes = config + .clusters + .iter() + .filter(|managed| managed.proxmox_cluster == cluster.name) + .flat_map(|managed| managed.nodes.iter().map(|node| node.proxmox_node.clone())) + .collect::>(); + for node in unique_nodes { + if let Ok(status) = proxmox::get_node_status(cluster, &node) { + node_statuses.insert(node, status); + } + } + node_status_by_cluster.insert(cluster.name.clone(), node_statuses); + } + + match kubernetes::get_cluster_nodes() { + Ok(nodes) => { + for node in nodes { + if let Some(ip) = node.internal_ip { + discovered_node_ips.insert(node.name.clone(), ip); + } + discovered_node_roles.insert(node.name, node.role); + } + } + Err(err) => errors.push(format!("cluster nodes: {err}")), + } + + let etcd_members = talos::get_etcd_members(&config.talos).unwrap_or_else(|err| { + errors.push(format!("etcd members: {err}")); + Vec::new() + }); + let etcd_map = etcd_members + .into_iter() + .map(|member| { + ( + member.hostname, + if member.is_learner { + "learner".to_string() + } else { + "member".to_string() + }, + ) + }) + .collect::>(); + + let mut configured_vmids = BTreeSet::new(); + let mut configured_vmids_by_proxmox: HashMap> = HashMap::new(); + let mut configured_names = BTreeSet::new(); + for managed_cluster in &config.clusters { + let resources = vm_resources_by_cluster + .get(&managed_cluster.proxmox_cluster) + .cloned() + .unwrap_or_default(); + let resource_by_vmid = resources + .iter() + .map(|resource| (resource.vmid, resource.clone())) + .collect::>(); + + for node in &managed_cluster.nodes { + configured_vmids.insert(node.proxmox_vmid); + configured_vmids_by_proxmox + .entry(managed_cluster.proxmox_cluster.clone()) + .or_default() + .insert(node.proxmox_vmid); + configured_names.insert(node.name.clone()); + let live_ip = discovered_node_ips + .get(&node.name) + .cloned() + .unwrap_or_else(|| node.ip.clone()); + let talos_version = talos::get_version(&config.talos, &live_ip).ok(); + let vm = resource_by_vmid.get(&node.proxmox_vmid); + let health = match (vm.map(|vm| vm.status.as_str()), talos_version.as_ref()) { + (Some("running"), Some(_)) => "healthy", + (Some("running"), None) => "degraded", + (Some(_), _) => "stopped", + (None, Some(_)) => "unknown-vm", + (None, None) => "unknown", + }; + snapshot.cluster_rows.push(ClusterRow { + name: node.name.clone(), + role: node.role.clone(), + ip: live_ip, + pve_node: node.proxmox_node.clone(), + vmid: node.proxmox_vmid.to_string(), + talos_version: talos_version + .as_ref() + .map(|version| version.talos_version.clone()) + .unwrap_or_else(|| "-".to_string()), + kubernetes_version: talos_version + .as_ref() + .map(|version| version.kubernetes_version.clone()) + .unwrap_or_else(|| "-".to_string()), + etcd: etcd_map + .get(&node.name) + .cloned() + .unwrap_or_else(|| "-".to_string()), + health: health.to_string(), + }); + } + } + + for resources in vm_resources_by_cluster.values() { + for resource in resources { + if configured_vmids.contains(&resource.vmid) + || configured_names.contains(&resource.name) + { + continue; + } + let Some(ip) = discovered_node_ips.get(&resource.name).cloned() else { + continue; + }; + let talos_version = talos::get_version(&config.talos, &ip).ok(); + let health = match (resource.status.as_str(), talos_version.as_ref()) { + ("running", Some(_)) => "healthy", + ("running", None) => "degraded", + (_, Some(_)) => "unknown", + _ => "unknown", + }; + snapshot.cluster_rows.push(ClusterRow { + name: resource.name.clone(), + role: discovered_node_roles + .get(&resource.name) + .cloned() + .unwrap_or_else(|| infer_role_from_name(&resource.name)), + ip, + pve_node: resource.node.clone(), + vmid: resource.vmid.to_string(), + talos_version: talos_version + .as_ref() + .map(|version| version.talos_version.clone()) + .unwrap_or_else(|| "-".to_string()), + kubernetes_version: talos_version + .as_ref() + .map(|version| version.kubernetes_version.clone()) + .unwrap_or_else(|| "-".to_string()), + etcd: etcd_map + .get(&resource.name) + .cloned() + .unwrap_or_else(|| "-".to_string()), + health: health.to_string(), + }); + } + } + + for cluster in &config.proxmox.clusters { + let resources = vm_resources_by_cluster + .get(&cluster.name) + .cloned() + .unwrap_or_default(); + for resource in &resources { + snapshot.vm_disks.push(VmDiskRow { + vm_name: resource.name.clone(), + vmid: resource.vmid.to_string(), + node: resource.node.clone(), + size_str: format_bytes(resource.maxdisk), + size_bytes: resource.maxdisk, + }); + } + if let Some(storage) = storage_by_cluster.get(&cluster.name) { + let configured_backup_vmids = configured_vmids_by_proxmox + .get(&cluster.name) + .cloned() + .unwrap_or_default(); + for pool in storage { + let used = pool.disk.max(0); + let total = pool.maxdisk.max(0); + let usage_pct = if total == 0 { + 0.0 + } else { + (used as f64 / total as f64) * 100.0 + }; + snapshot.storage_pools.push(StoragePoolRow { + name: pool.name.clone(), + node: pool.node.clone(), + pool_type: pool.pool_type.clone(), + used_str: format_bytes(used), + total_str: format_bytes(total), + status: pool.status.clone(), + usage_pct, + }); + + match proxmox::list_backups(cluster, &pool.node, &pool.name) { + Ok(backups) => { + for backup in backups { + if !configured_backup_vmids.contains(&backup.vmid) { + continue; + } + let vm_name = resources + .iter() + .find(|resource| resource.vmid == backup.vmid) + .map(|resource| resource.name.clone()) + .unwrap_or_else(|| "unknown".to_string()); + let age = age_days(backup.ctime); + snapshot.backups.push(BackupRow { + proxmox_cluster: cluster.name.clone(), + volid: backup.volid.clone(), + node: backup.node.clone(), + storage: backup.storage.clone(), + vm_name, + vmid: backup.vmid.to_string(), + size_str: format_bytes(backup.size), + date_str: format_epoch(backup.ctime), + age_days: age, + is_stale: age >= config.tui.backups.stale_days, + }); + } + } + Err(err) => errors.push(format!( + "{} backups {}/{}: {err}", + cluster.name, pool.node, pool.name + )), + } + } + } + } + + match kubernetes::detect_providers() { + Ok(providers) => { + if providers.volsync { + match kubernetes::get_volsync_sources() { + Ok(entries) => snapshot + .k8s_backups + .extend(entries.into_iter().map(k8s_row)), + Err(err) => errors.push(format!("volsync backups: {err}")), + } + } + if providers.velero { + match kubernetes::get_velero_backups() { + Ok(entries) => snapshot + .k8s_backups + .extend(entries.into_iter().map(k8s_row)), + Err(err) => errors.push(format!("velero backups: {err}")), + } + } + } + Err(err) => errors.push(format!("kubernetes providers: {err}")), + } + + for (cluster_name, statuses) in &node_status_by_cluster { + for status in statuses.values() { + snapshot.hosts.push(HostRow { + name: format!("{cluster_name}/{}", status.node), + cpu_pct: status.cpu * 100.0, + mem_used_str: format_bytes(status.mem), + mem_total_str: format_bytes(status.maxmem), + mem_pct: if status.maxmem == 0 { + 0.0 + } else { + (status.mem as f64 / status.maxmem as f64) * 100.0 + }, + }); + } + } + + match MetricsClient::detect() { + Ok(Some(client)) => { + snapshot.metrics_available = true; + let cpu = client + .query("sum(rate(container_cpu_usage_seconds_total{container!=\"\",pod!=\"\"}[5m])) by (pod, namespace)") + .unwrap_or_else(|err| { + errors.push(format!("metrics cpu: {err}")); + Vec::new() + }); + let mem = client + .query("sum(container_memory_working_set_bytes{container!=\"\",pod!=\"\"}) by (pod, namespace)") + .unwrap_or_default(); + let rx = client + .query("sum(rate(container_network_receive_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)") + .unwrap_or_default(); + let tx = client + .query("sum(rate(container_network_transmit_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)") + .unwrap_or_default(); + + let mem_map = metric_map(mem); + let rx_map = metric_map(rx); + let tx_map = metric_map(tx); + for sample in cpu { + let pod = label(&sample.labels, "pod"); + let namespace = label(&sample.labels, "namespace"); + let key = format!("{namespace}/{pod}"); + let mem_value = mem_map.get(&key).copied().unwrap_or_default(); + let rx_value = rx_map.get(&key).copied().unwrap_or_default(); + let tx_value = tx_map.get(&key).copied().unwrap_or_default(); + snapshot.pods.push(PodMetricRow { + pod, + namespace, + cpu_str: format!("{:.3}", sample.value), + mem_str: format_bytes(mem_value as i64), + net_rx_str: format_rate(rx_value), + net_tx_str: format_rate(tx_value), + cpu_cores: sample.value, + mem_bytes: mem_value, + net_rx_bytes_sec: rx_value, + net_tx_bytes_sec: tx_value, + }); + } + } + Ok(None) => { + snapshot.metrics_available = false; + } + Err(err) => errors.push(format!("metrics detect: {err}")), + } + + snapshot.cluster_rows.sort_by(|a, b| a.name.cmp(&b.name)); + snapshot.storage_pools.sort_by(|a, b| a.name.cmp(&b.name)); + snapshot + .vm_disks + .sort_by_key(|disk| Reverse(disk.size_bytes)); + snapshot + .backups + .sort_by_key(|backup| Reverse(backup.age_days)); + snapshot + .k8s_backups + .sort_by(|a, b| a.namespace.cmp(&b.namespace).then(a.name.cmp(&b.name))); + snapshot.hosts.sort_by(|a, b| a.name.cmp(&b.name)); + snapshot.pods.sort_by(|a, b| { + b.cpu_cores + .partial_cmp(&a.cpu_cores) + .unwrap_or(std::cmp::Ordering::Equal) + }); + snapshot.last_refresh_label = Some(format_system_time(SystemTime::now())); + if !errors.is_empty() { + snapshot.last_error = Some(errors.join(" | ")); + } + snapshot +} + +fn infer_role_from_name(name: &str) -> String { + let lower = name.to_ascii_lowercase(); + if lower.contains("control") || lower.contains("-cp-") || lower.contains("master") { + "controlplane".to_string() + } else { + "worker".to_string() + } +} + +fn k8s_row(entry: kubernetes::K8sBackupEntry) -> K8sBackupRow { + K8sBackupRow { + name: entry.name, + namespace: entry.namespace, + source_type: entry.source_type, + status: entry.status, + schedule: entry.schedule, + last_run: entry.last_run, + } +} + +fn metric_map(samples: Vec) -> HashMap { + samples + .into_iter() + .map(|sample| { + let pod = label(&sample.labels, "pod"); + let namespace = label(&sample.labels, "namespace"); + (format!("{namespace}/{pod}"), sample.value) + }) + .collect() +} + +fn label(labels: &serde_json::Map, key: &str) -> String { + labels + .get(key) + .and_then(|value| value.as_str()) + .unwrap_or("unknown") + .to_string() +} diff --git a/tui/src/util.rs b/tui/src/util.rs new file mode 100644 index 0000000..8de6b83 --- /dev/null +++ b/tui/src/util.rs @@ -0,0 +1,113 @@ +use std::time::{SystemTime, UNIX_EPOCH}; + +pub fn truncate(value: &str, max: usize) -> String { + if value.chars().count() <= max { + return value.to_string(); + } + if max <= 1 { + return "…".to_string(); + } + let mut out = value.chars().take(max - 1).collect::(); + out.push('…'); + out +} + +pub fn format_bytes(bytes: i64) -> String { + let bytes = bytes.max(0) as f64; + const KIB: f64 = 1024.0; + const MIB: f64 = 1024.0 * 1024.0; + const GIB: f64 = 1024.0 * 1024.0 * 1024.0; + const TIB: f64 = 1024.0 * 1024.0 * 1024.0 * 1024.0; + + if bytes >= TIB { + format!("{:.1} TiB", bytes / TIB) + } else if bytes >= GIB { + format!("{:.1} GiB", bytes / GIB) + } else if bytes >= MIB { + format!("{:.1} MiB", bytes / MIB) + } else { + format!("{:.0} KiB", bytes / KIB) + } +} + +pub fn format_rate(bytes_per_sec: f64) -> String { + const KIB: f64 = 1024.0; + const MIB: f64 = 1024.0 * 1024.0; + if bytes_per_sec >= MIB { + format!("{:.1} MiB/s", bytes_per_sec / MIB) + } else if bytes_per_sec >= KIB { + format!("{:.1} KiB/s", bytes_per_sec / KIB) + } else { + format!("{:.0} B/s", bytes_per_sec) + } +} + +pub fn format_epoch(epoch: i64) -> String { + if epoch <= 0 { + return "unknown".to_string(); + } + #[allow(deprecated)] + { + use std::time::Duration; + let secs = Duration::from_secs(epoch as u64); + let t = UNIX_EPOCH + secs; + format_system_time(t) + } +} + +pub fn format_system_time(time: SystemTime) -> String { + let Ok(duration) = time.duration_since(UNIX_EPOCH) else { + return "unknown".to_string(); + }; + let secs = duration.as_secs() as i64; + chrono_like(secs) +} + +fn chrono_like(epoch: i64) -> String { + const SECS_PER_DAY: i64 = 86_400; + let days = epoch.div_euclid(SECS_PER_DAY); + let seconds = epoch.rem_euclid(SECS_PER_DAY); + let (year, month, day) = civil_from_days(days); + let hour = seconds / 3600; + let minute = (seconds % 3600) / 60; + format!("{year:04}-{month:02}-{day:02} {hour:02}:{minute:02}") +} + +fn civil_from_days(days: i64) -> (i32, u32, u32) { + let z = days + 719_468; + let era = if z >= 0 { z } else { z - 146_096 } / 146_097; + let doe = z - era * 146_097; + let yoe = (doe - doe / 1_460 + doe / 36_524 - doe / 146_096) / 365; + let y = yoe + era * 400; + let doy = doe - (365 * yoe + yoe / 4 - yoe / 100); + let mp = (5 * doy + 2) / 153; + let d = doy - (153 * mp + 2) / 5 + 1; + let m = mp + if mp < 10 { 3 } else { -9 }; + let year = y + if m <= 2 { 1 } else { 0 }; + (year as i32, m as u32, d as u32) +} + +pub fn age_days(epoch: i64) -> u32 { + let now = SystemTime::now(); + let then = UNIX_EPOCH + std::time::Duration::from_secs(epoch.max(0) as u64); + let Ok(delta) = now.duration_since(then) else { + return 0; + }; + (delta.as_secs() / 86_400) as u32 +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn duration_helpers_format_bytes() { + assert_eq!(format_bytes(1024), "1 KiB"); + assert_eq!(format_bytes(1024 * 1024), "1.0 MiB"); + } + + #[test] + fn truncate_adds_ellipsis() { + assert_eq!(truncate("abcdef", 4), "abc…"); + } +} diff --git a/tui/src/views/backups.rs b/tui/src/views/backups.rs new file mode 100644 index 0000000..2e8422a --- /dev/null +++ b/tui/src/views/backups.rs @@ -0,0 +1,335 @@ +use crate::models::{BackupRow, DeleteAction, Snapshot}; +use crate::util::truncate; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + Frame, + layout::{Constraint, Direction, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, Cell, Clear, Paragraph, Row, Table, TableState, Wrap}, +}; + +#[derive(Default)] +pub struct BackupsViewState { + pub selected: usize, + pub filter: String, + pub filter_mode: bool, + pub confirm_delete: bool, + pub pending_delete: Option, + pub pve_table: TableState, +} + +impl BackupsViewState { + pub fn handle_key(&mut self, key: KeyEvent, snapshot: &Snapshot) -> Option { + if self.confirm_delete { + match key.code { + KeyCode::Char('y') => { + self.confirm_delete = false; + return self.pending_delete.take(); + } + KeyCode::Char('n') | KeyCode::Esc => { + self.confirm_delete = false; + self.pending_delete = None; + } + _ => {} + } + return None; + } + + if self.filter_mode { + match key.code { + KeyCode::Esc => { + self.filter.clear(); + self.filter_mode = false; + } + KeyCode::Enter => self.filter_mode = false, + KeyCode::Backspace => { + self.filter.pop(); + } + KeyCode::Char(ch) => self.filter.push(ch), + _ => {} + } + return None; + } + + let filtered = filtered_pve(snapshot, &self.filter); + let filtered_k8s = filtered_k8s(snapshot, &self.filter); + let total = filtered.len() + filtered_k8s.len(); + if total == 0 { + self.selected = 0; + } + + match key.code { + KeyCode::Char('/') => self.filter_mode = true, + KeyCode::Esc => self.filter.clear(), + KeyCode::Char('j') | KeyCode::Down => { + if total > 0 { + self.selected = (self.selected + 1).min(total - 1); + } + } + KeyCode::Char('k') | KeyCode::Up => { + self.selected = self.selected.saturating_sub(1); + } + KeyCode::Char('g') => self.selected = 0, + KeyCode::Char('G') => { + if total > 0 { + self.selected = total - 1; + } + } + KeyCode::Char('d') => { + if self.selected < filtered.len() { + let row = filtered[self.selected]; + self.pending_delete = Some(DeleteAction { + proxmox_cluster: row.proxmox_cluster.clone(), + node: row.node.clone(), + storage: row.storage.clone(), + volid: row.volid.clone(), + }); + self.confirm_delete = true; + } + } + _ => {} + } + None + } +} + +pub fn render(frame: &mut Frame, area: Rect, state: &mut BackupsViewState, snapshot: &Snapshot) { + let pve = filtered_pve(snapshot, &state.filter); + let k8s = filtered_k8s(snapshot, &state.filter); + + let chunks = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Percentage(52), + Constraint::Percentage(48), + Constraint::Length(2), + ]) + .split(area); + + let pve_rows = pve.iter().map(|row| { + let age_style = if row.is_stale { + Style::default().fg(Color::Yellow) + } else { + Style::default() + }; + Row::new(vec![ + Cell::from(truncate(&row.vm_name, 18)), + Cell::from(row.vmid.clone()), + Cell::from(truncate(&row.date_str, 16)), + Cell::from(row.size_str.clone()), + Cell::from(truncate(&row.storage, 12)), + Cell::from(format!("{}d", row.age_days)).style(age_style), + ]) + }); + let pve_table = Table::new( + pve_rows, + [ + Constraint::Length(18), + Constraint::Length(8), + Constraint::Length(16), + Constraint::Length(12), + Constraint::Length(12), + Constraint::Min(8), + ], + ) + .header( + Row::new(vec!["VM Name", "VMID", "Date", "Size", "Storage", "Age"]).style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title("PVE Backups")) + .row_highlight_style( + Style::default() + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD), + ); + state + .pve_table + .select(if pve.is_empty() || state.selected >= pve.len() { + None + } else { + Some(state.selected) + }); + frame.render_stateful_widget(pve_table, chunks[0], &mut state.pve_table); + + let k8s_rows = k8s.iter().enumerate().map(|(index, row)| { + let global_index = pve.len() + index; + let mut style = Style::default(); + if global_index == state.selected { + style = style + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD); + } + Row::new(vec![ + Cell::from(truncate(&row.name, 20)), + Cell::from(truncate(&row.namespace, 14)), + Cell::from(truncate(&row.source_type, 10)), + Cell::from(truncate(&row.status, 12)), + Cell::from(truncate(&row.schedule, 14)), + Cell::from(truncate(&row.last_run, 16)), + ]) + .style(style) + }); + let k8s_table = Table::new( + k8s_rows, + [ + Constraint::Length(20), + Constraint::Length(14), + Constraint::Length(10), + Constraint::Length(12), + Constraint::Length(14), + Constraint::Min(12), + ], + ) + .header( + Row::new(vec![ + "Name", + "Namespace", + "Source", + "Status", + "Schedule", + "Last Run", + ]) + .style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title("K8s Backups")); + frame.render_widget(k8s_table, chunks[1]); + + let footer = if state.filter_mode { + vec![ + Span::styled( + "Filter: ", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(&state.filter), + Span::styled( + " (Enter to apply, Esc to clear)", + Style::default().fg(Color::DarkGray), + ), + ] + } else { + vec![ + Span::raw(format!( + "filter={} ", + if state.filter.is_empty() { + "" + } else { + &state.filter + } + )), + Span::styled( + "/", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" search "), + Span::styled( + "d", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" delete backup "), + Span::styled( + "y/n", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" confirm/cancel"), + ] + }; + frame.render_widget( + Paragraph::new(Line::from(footer)) + .block(Block::default().borders(Borders::ALL).title("Backups")), + chunks[2], + ); + + if state.confirm_delete { + let popup = centered_rect(area, 60, 20); + frame.render_widget(Clear, popup); + let action = state.pending_delete.as_ref(); + let text = format!( + "Delete backup?\n\n{}\n\nPress y to confirm or n to cancel.", + action.map(|item| item.volid.as_str()).unwrap_or("unknown") + ); + frame.render_widget( + Paragraph::new(text).wrap(Wrap { trim: true }).block( + Block::default() + .borders(Borders::ALL) + .title("Confirm delete"), + ), + popup, + ); + } +} + +fn filtered_pve<'a>(snapshot: &'a Snapshot, filter: &str) -> Vec<&'a BackupRow> { + let needle = filter.to_lowercase(); + snapshot + .backups + .iter() + .filter(|row| { + needle.is_empty() + || [ + row.vm_name.as_str(), + row.vmid.as_str(), + row.storage.as_str(), + row.node.as_str(), + row.volid.as_str(), + ] + .iter() + .any(|value| value.to_lowercase().contains(&needle)) + }) + .collect() +} + +fn filtered_k8s<'a>(snapshot: &'a Snapshot, filter: &str) -> Vec<&'a crate::models::K8sBackupRow> { + let needle = filter.to_lowercase(); + snapshot + .k8s_backups + .iter() + .filter(|row| { + needle.is_empty() + || [ + row.name.as_str(), + row.namespace.as_str(), + row.source_type.as_str(), + row.status.as_str(), + ] + .iter() + .any(|value| value.to_lowercase().contains(&needle)) + }) + .collect() +} + +fn centered_rect(area: Rect, width_percent: u16, height_percent: u16) -> Rect { + let vertical = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Percentage((100 - height_percent) / 2), + Constraint::Percentage(height_percent), + Constraint::Percentage((100 - height_percent) / 2), + ]) + .split(area); + Layout::default() + .direction(Direction::Horizontal) + .constraints([ + Constraint::Percentage((100 - width_percent) / 2), + Constraint::Percentage(width_percent), + Constraint::Percentage((100 - width_percent) / 2), + ]) + .split(vertical[1])[1] +} diff --git a/tui/src/views/backups.zig b/tui/src/views/backups.zig deleted file mode 100644 index ec28ea8..0000000 --- a/tui/src/views/backups.zig +++ /dev/null @@ -1,518 +0,0 @@ -const std = @import("std"); -const vaxis = @import("vaxis"); -const poll = @import("../poll.zig"); - -pub const DeleteAction = struct { - proxmox_cluster: []const u8, - node: []const u8, - storage: []const u8, - volid: []const u8, -}; - -pub const BackupView = struct { - selected: u16 = 0, - scroll: u16 = 0, - num_backups: u16 = 0, - stale_days: u32, - allocator: std.mem.Allocator, - - // Total row count across both sections (for navigation) - total_rows: u16 = 0, - - // Confirmation dialog state - show_confirm: bool = false, - pending_idx: ?u16 = null, - pending_delete: ?DeleteAction = null, - - // Set by handleKey when user confirms deletion - delete_action: ?DeleteAction = null, - - // Search/filter state - filter_active: bool = false, - filter_buf: [64]u8 = undefined, - filter_len: u8 = 0, - - const pve_col_header = " VM Name VMID Date Size Storage Age"; - const k8s_col_header = " Name Namespace Source Status Schedule Last Run"; - - pub fn init(allocator: std.mem.Allocator, stale_days: u32) BackupView { - return .{ - .stale_days = stale_days, - .allocator = allocator, - }; - } - - pub fn handleKey(self: *BackupView, key: vaxis.Key) void { - // Confirmation dialog intercepts all input - if (self.show_confirm) { - if (key.matches('y', .{})) { - self.delete_action = self.pending_delete; - self.pending_delete = null; - self.pending_idx = null; - self.show_confirm = false; - } else if (key.matches('n', .{}) or key.matches(vaxis.Key.escape, .{})) { - self.show_confirm = false; - self.clearPendingDelete(); - self.pending_idx = null; - } - return; - } - - // Filter input mode intercepts all input - if (self.filter_active) { - if (key.matches(vaxis.Key.escape, .{}) or key.matches(vaxis.Key.enter, .{})) { - if (key.matches(vaxis.Key.escape, .{})) { - self.filter_len = 0; // Clear filter on Esc - } - self.filter_active = false; - } else if (key.matches(vaxis.Key.backspace, .{})) { - if (self.filter_len > 0) self.filter_len -= 1; - } else if (key.text) |text| { - for (text) |c| { - if (self.filter_len < self.filter_buf.len) { - self.filter_buf[self.filter_len] = c; - self.filter_len += 1; - } - } - } - return; - } - - if (key.matches('/', .{})) { - self.filter_active = true; - return; - } - - // Esc clears active filter when not in input mode - if (key.matches(vaxis.Key.escape, .{})) { - if (self.filter_len > 0) { - self.filter_len = 0; - self.selected = 0; - } - return; - } - - if (self.total_rows == 0) return; - - if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { - if (self.selected < self.total_rows - 1) self.selected += 1; - } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { - if (self.selected > 0) self.selected -= 1; - } else if (key.matches('g', .{})) { - self.selected = 0; - } else if (key.matches('G', .{ .shift = true })) { - if (self.total_rows > 0) self.selected = self.total_rows - 1; - } else if (key.matches('d', .{})) { - // Only allow deletion on PVE backup rows - if (self.selected < self.num_backups) { - self.clearPendingDelete(); - self.pending_idx = self.selected; - self.show_confirm = true; - self.delete_action = null; - } - } - } - - pub fn draw( - self: *BackupView, - win: vaxis.Window, - backups: []const poll.BackupRow, - k8s_backups: []const poll.K8sBackupRow, - ) void { - // Apply filter - const filter = if (self.filter_len > 0) self.filter_buf[0..self.filter_len] else ""; - - // Count filtered rows - var pve_count: u16 = 0; - for (backups) |b| { - if (self.matchesFilter(b, filter)) pve_count += 1; - } - var k8s_count: u16 = 0; - for (k8s_backups) |b| { - if (self.matchesK8sFilter(b, filter)) k8s_count += 1; - } - - self.num_backups = pve_count; - self.total_rows = pve_count + k8s_count; - - if (self.total_rows == 0) { - self.selected = 0; - self.scroll = 0; - if (filter.len > 0) { - drawCentered(win, "No backups matching filter"); - } else { - drawCentered(win, "No backups found"); - } - self.drawFilterBar(win); - return; - } - - // Clamp selection - if (self.selected >= self.total_rows) self.selected = self.total_rows - 1; - - const footer_rows = self.filterBarRows(); - const content_height = win.height -| footer_rows; - if (content_height == 0) { - self.scroll = 0; - self.drawFilterBar(win); - return; - } - - const visible_rows = calcVisibleRows(content_height, pve_count, k8s_count); - if (self.selected < self.scroll) { - self.scroll = self.selected; - } else if (self.selected >= self.scroll + visible_rows) { - self.scroll = self.selected - visible_rows + 1; - } - - if (self.scroll >= self.total_rows) self.scroll = self.total_rows - 1; - const end_idx = self.scroll +| visible_rows; - - var current_row: u16 = 0; - - // PVE Backups section - if (pve_count > 0) { - var pve_header_buf: [48]u8 = undefined; - const pve_header = std.fmt.bufPrint(&pve_header_buf, " PVE Backups ({d})", .{pve_count}) catch " PVE Backups"; - const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; - _ = win.print(&.{.{ .text = pve_header, .style = hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - - const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; - _ = win.print(&.{.{ .text = pve_col_header, .style = col_hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - - var pve_idx: u16 = 0; - for (backups) |b| { - if (!self.matchesFilter(b, filter)) continue; - const logical_idx = pve_idx; - pve_idx += 1; - if (logical_idx < self.scroll) continue; - if (logical_idx >= end_idx) continue; - if (current_row >= content_height) break; - const is_selected = (logical_idx == self.selected); - drawBackupRow(win, current_row, b, is_selected, self.stale_days); - current_row += 1; - } - } - - // K8s Backups section - if (k8s_count > 0) { - if (pve_count > 0 and current_row < content_height -| 3) { - // Separator - current_row += 1; - } - - var k8s_header_buf: [48]u8 = undefined; - const k8s_header = std.fmt.bufPrint(&k8s_header_buf, " K8s Backups ({d})", .{k8s_count}) catch " K8s Backups"; - if (current_row < content_height -| 1) { - const hdr_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 8 }, .bold = true }; - _ = win.print(&.{.{ .text = k8s_header, .style = hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - } - - if (current_row < content_height) { - const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; - _ = win.print(&.{.{ .text = k8s_col_header, .style = col_hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - } - - var k8s_idx: u16 = 0; - for (k8s_backups) |b| { - if (!self.matchesK8sFilter(b, filter)) continue; - const logical_idx = pve_count + k8s_idx; - k8s_idx += 1; - if (logical_idx < self.scroll) continue; - if (logical_idx >= end_idx) continue; - if (current_row >= content_height) break; - const is_selected = (logical_idx == self.selected); - drawK8sRow(win, current_row, b, is_selected); - current_row += 1; - } - } else if (pve_count > 0 and current_row < content_height -| 1) { - // Show "no K8s providers" hint - current_row += 1; - const hint_style: vaxis.Style = .{ .fg = .{ .index = 8 } }; - _ = win.print(&.{.{ .text = " K8s Backups: No providers detected", .style = hint_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - } - - // Filter bar at bottom - self.drawFilterBar(win); - - // Confirmation dialog overlay - if (self.show_confirm) { - if (self.pending_delete == null) { - if (self.pending_idx) |idx| { - if (self.filteredBackupIndex(backups, idx)) |actual_idx| { - self.pending_delete = self.actionFromBackup(backups[actual_idx]) catch null; - } else { - self.show_confirm = false; - self.pending_idx = null; - } - } - } - if (self.pending_delete) |action| { - self.drawConfirmDialog(win, action.volid); - } - } - } - - fn drawFilterBar(self: *BackupView, win: vaxis.Window) void { - if (!self.filter_active and self.filter_len == 0) return; - - const bar_row = win.height -| 1; - const filter_text = self.filter_buf[0..self.filter_len]; - - if (self.filter_active) { - var buf: [80]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " / filter: {s}_", .{filter_text}) catch " / filter: "; - _ = win.print(&.{.{ .text = line, .style = .{ - .fg = .{ .index = 6 }, - .bg = .{ .index = 8 }, - .bold = true, - } }}, .{ - .row_offset = bar_row, - .wrap = .none, - }); - } else if (self.filter_len > 0) { - var buf: [80]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " filter: {s} (/ to edit, Esc to clear)", .{filter_text}) catch ""; - _ = win.print(&.{.{ .text = line, .style = .{ - .fg = .{ .index = 8 }, - } }}, .{ - .row_offset = bar_row, - .wrap = .none, - }); - } - } - - fn matchesFilter(self: *BackupView, b: poll.BackupRow, filter: []const u8) bool { - _ = self; - if (filter.len == 0) return true; - if (containsInsensitive(b.vm_name, filter)) return true; - if (containsInsensitive(b.vmid, filter)) return true; - if (containsInsensitive(b.storage, filter)) return true; - if (containsInsensitive(b.date_str, filter)) return true; - return false; - } - - fn matchesK8sFilter(self: *BackupView, b: poll.K8sBackupRow, filter: []const u8) bool { - _ = self; - if (filter.len == 0) return true; - if (containsInsensitive(b.name, filter)) return true; - if (containsInsensitive(b.namespace, filter)) return true; - if (containsInsensitive(b.source_type, filter)) return true; - if (containsInsensitive(b.status, filter)) return true; - return false; - } - - fn drawBackupRow(win: vaxis.Window, row: u16, b: poll.BackupRow, selected: bool, stale_days: u32) void { - const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; - const base_fg: vaxis.Color = if (selected) - .{ .index = 0 } - else if (b.age_days > stale_days * 2) - .{ .index = 1 } // red: very stale - else if (b.is_stale) - .{ .index = 3 } // yellow: stale - else - .{ .index = 7 }; // normal - - const style: vaxis.Style = .{ .fg = base_fg, .bg = bg }; - - var age_buf: [16]u8 = undefined; - const age_str = std.fmt.bufPrint(&age_buf, "{d}d", .{b.age_days}) catch "?d"; - - var buf: [256]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " {s:<16} {s:<7} {s:<17} {s:<12} {s:<13} {s}", .{ - truncate(b.vm_name, 16), - truncate(b.vmid, 7), - truncate(b.date_str, 17), - truncate(b.size_str, 12), - truncate(b.storage, 13), - age_str, - }) catch return; - - _ = win.print(&.{.{ .text = line, .style = style }}, .{ - .row_offset = row, - .wrap = .none, - }); - } - - fn drawK8sRow(win: vaxis.Window, row: u16, b: poll.K8sBackupRow, selected: bool) void { - const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; - const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; - const style: vaxis.Style = .{ .fg = fg, .bg = bg }; - - var buf: [256]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " {s:<24} {s:<14} {s:<9} {s:<12} {s:<16} {s}", .{ - truncate(b.name, 24), - truncate(b.namespace, 14), - truncate(b.source_type, 9), - truncate(b.status, 12), - truncate(b.schedule, 16), - truncate(b.last_run, 20), - }) catch return; - - _ = win.print(&.{.{ .text = line, .style = style }}, .{ - .row_offset = row, - .wrap = .none, - }); - } - - fn drawConfirmDialog(self: *BackupView, win: vaxis.Window, volid: []const u8) void { - _ = self; - const box_w: u16 = 52; - const box_h: u16 = 7; - const x: i17 = @intCast(if (win.width > box_w) (win.width - box_w) / 2 else 0); - const y: i17 = @intCast(if (win.height > box_h) (win.height - box_h) / 2 else 0); - - const dialog = win.child(.{ - .x_off = x, - .y_off = y, - .width = box_w, - .height = box_h, - .border = .{ .where = .all, .style = .{ .fg = .{ .index = 1 } } }, - }); - - dialog.fill(.{ .style = .{ .bg = .{ .index = 0 } } }); - - const title_style: vaxis.Style = .{ .fg = .{ .index = 1 }, .bg = .{ .index = 0 }, .bold = true }; - const text_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 0 } }; - const hint_style: vaxis.Style = .{ .fg = .{ .index = 8 }, .bg = .{ .index = 0 } }; - - _ = dialog.print(&.{.{ .text = " Delete Backup?", .style = title_style }}, .{ - .row_offset = 0, - .wrap = .none, - }); - - var name_buf: [48]u8 = undefined; - const name_line = std.fmt.bufPrint(&name_buf, " {s}", .{truncate(volid, 46)}) catch " ?"; - _ = dialog.print(&.{.{ .text = name_line, .style = text_style }}, .{ - .row_offset = 2, - .wrap = .none, - }); - - _ = dialog.print(&.{.{ .text = " y: confirm n/Esc: cancel", .style = hint_style }}, .{ - .row_offset = 4, - .wrap = .none, - }); - } - - /// Check if there's a pending delete action and consume it. - pub fn consumeDeleteAction(self: *BackupView) ?DeleteAction { - if (self.delete_action != null) { - self.pending_idx = null; - const action = self.delete_action.?; - self.delete_action = null; - return action; - } - return null; - } - - fn drawCentered(win: vaxis.Window, msg: []const u8) void { - const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = col, - .row_offset = win.height / 2, - .wrap = .none, - }); - } - - fn truncate(s: []const u8, max: usize) []const u8 { - return if (s.len > max) s[0..max] else s; - } - - fn calcHeaderRows(pve_count: u16, k8s_count: u16) u16 { - var rows: u16 = 0; - if (pve_count > 0) rows += 2; - if (k8s_count > 0) { - if (pve_count > 0) rows += 1; - rows += 2; - } - return rows; - } - - fn filterBarRows(self: *BackupView) u16 { - return if (self.filter_active or self.filter_len > 0) 1 else 0; - } - - fn calcVisibleRows(content_height: u16, pve_count: u16, k8s_count: u16) u16 { - if (content_height == 0) return 0; - const header_rows = calcHeaderRows(pve_count, k8s_count); - return @max(@as(u16, 1), content_height -| header_rows); - } - - fn filteredBackupIndex(self: *BackupView, backups: []const poll.BackupRow, filtered_idx: u16) ?u16 { - const filter = if (self.filter_len > 0) self.filter_buf[0..self.filter_len] else ""; - var matched: u16 = 0; - for (backups, 0..) |b, i| { - if (!self.matchesFilter(b, filter)) continue; - if (matched == filtered_idx) return @intCast(i); - matched += 1; - } - return null; - } - - fn actionFromBackup(self: *BackupView, backup: poll.BackupRow) !DeleteAction { - const proxmox_cluster = try self.allocator.dupe(u8, backup.proxmox_cluster); - errdefer self.allocator.free(proxmox_cluster); - const node = try self.allocator.dupe(u8, backup.node); - errdefer self.allocator.free(node); - const storage = try self.allocator.dupe(u8, backup.storage); - errdefer self.allocator.free(storage); - const volid = try self.allocator.dupe(u8, backup.volid); - return .{ - .proxmox_cluster = proxmox_cluster, - .node = node, - .storage = storage, - .volid = volid, - }; - } - - fn clearPendingDelete(self: *BackupView) void { - if (self.pending_delete) |action| { - self.allocator.free(action.proxmox_cluster); - self.allocator.free(action.node); - self.allocator.free(action.storage); - self.allocator.free(action.volid); - self.pending_delete = null; - } - } -}; - -/// Case-insensitive substring check (ASCII only). -fn containsInsensitive(haystack: []const u8, needle: []const u8) bool { - if (needle.len == 0) return true; - if (needle.len > haystack.len) return false; - const limit = haystack.len - needle.len + 1; - for (0..limit) |i| { - var match = true; - for (0..needle.len) |j| { - if (toLower(haystack[i + j]) != toLower(needle[j])) { - match = false; - break; - } - } - if (match) return true; - } - return false; -} - -fn toLower(c: u8) u8 { - return if (c >= 'A' and c <= 'Z') c + 32 else c; -} diff --git a/tui/src/views/cluster.rs b/tui/src/views/cluster.rs new file mode 100644 index 0000000..4c983d2 --- /dev/null +++ b/tui/src/views/cluster.rs @@ -0,0 +1,94 @@ +use crate::models::Snapshot; +use crate::util::truncate; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + Frame, + layout::{Constraint, Rect}, + style::{Color, Modifier, Style}, + widgets::{Block, Borders, Cell, Paragraph, Row, Table, TableState}, +}; + +#[derive(Default)] +pub struct ClusterViewState { + pub table: TableState, +} + +impl ClusterViewState { + pub fn handle_key(&mut self, key: KeyEvent, row_count: usize) { + if row_count == 0 { + self.table.select(None); + return; + } + let selected = self.table.selected().unwrap_or(0); + let next = match key.code { + KeyCode::Char('j') | KeyCode::Down => selected.saturating_add(1).min(row_count - 1), + KeyCode::Char('k') | KeyCode::Up => selected.saturating_sub(1), + KeyCode::Char('g') => 0, + KeyCode::Char('G') => row_count - 1, + _ => selected, + }; + self.table.select(Some(next)); + } +} + +pub fn render(frame: &mut Frame, area: Rect, state: &mut ClusterViewState, snapshot: &Snapshot) { + if snapshot.cluster_rows.is_empty() { + frame.render_widget( + Paragraph::new("No cluster data available") + .block(Block::default().borders(Borders::ALL).title("Cluster")), + area, + ); + return; + } + + let rows = snapshot.cluster_rows.iter().map(|row| { + Row::new(vec![ + Cell::from(truncate(&row.name, 16)), + Cell::from(truncate(&row.role, 12)), + Cell::from(truncate(&row.ip, 16)), + Cell::from(truncate(&row.pve_node, 12)), + Cell::from(row.vmid.clone()), + Cell::from(truncate(&row.talos_version, 12)), + Cell::from(truncate(&row.kubernetes_version, 12)), + Cell::from(truncate(&row.etcd, 10)), + Cell::from(truncate(&row.health, 12)), + ]) + }); + + let table = Table::new( + rows, + [ + Constraint::Length(16), + Constraint::Length(12), + Constraint::Length(16), + Constraint::Length(12), + Constraint::Length(8), + Constraint::Length(12), + Constraint::Length(12), + Constraint::Length(10), + Constraint::Min(10), + ], + ) + .header( + Row::new(vec![ + "Name", "Role", "IP", "PVE Node", "VMID", "Talos", "K8s", "Etcd", "Health", + ]) + .style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title("Cluster")) + .row_highlight_style( + Style::default() + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD), + ); + + if state.table.selected().is_none() { + state.table.select(Some(0)); + } + frame.render_stateful_widget(table, area, &mut state.table); +} diff --git a/tui/src/views/cluster.zig b/tui/src/views/cluster.zig deleted file mode 100644 index d6f5658..0000000 --- a/tui/src/views/cluster.zig +++ /dev/null @@ -1,82 +0,0 @@ -const std = @import("std"); -const vaxis = @import("vaxis"); -const poll = @import("../poll.zig"); - -const Table = vaxis.widgets.Table; -const Cell = vaxis.Cell; - -pub const ClusterView = struct { - table_ctx: Table.TableContext, - num_rows: u16 = 0, - - pub fn init() ClusterView { - return .{ - .table_ctx = .{ - .active = true, - .selected_bg = .{ .index = 4 }, - .selected_fg = .{ .index = 0 }, - .active_bg = .{ .index = 4 }, - .active_fg = .{ .index = 0 }, - .hdr_bg_1 = .{ .index = 8 }, - .hdr_bg_2 = .{ .index = 8 }, - .row_bg_1 = .default, - .row_bg_2 = .default, - .col_width = .dynamic_fill, - .header_names = .{ .custom = &.{ - "Name", "Role", "IP", "PVE Node", "VMID", "Talos Ver", "K8s Ver", "Etcd", "Health", - } }, - }, - }; - } - - pub fn handleKey(self: *ClusterView, key: vaxis.Key) void { - if (self.num_rows == 0) return; - - if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { - if (self.table_ctx.row < self.num_rows - 1) { - self.table_ctx.row += 1; - } - } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { - if (self.table_ctx.row > 0) { - self.table_ctx.row -= 1; - } - } else if (key.matches('g', .{})) { - // gg: go to top (single g for now) - self.table_ctx.row = 0; - } else if (key.matches('G', .{ .shift = true })) { - // G: go to bottom - if (self.num_rows > 0) { - self.table_ctx.row = self.num_rows - 1; - } - } - } - - pub fn draw(self: *ClusterView, alloc: std.mem.Allocator, win: vaxis.Window, rows: []const poll.NodeRow) void { - self.num_rows = @intCast(rows.len); - if (rows.len == 0) { - self.drawEmpty(win); - return; - } - - // Clamp selected row - if (self.table_ctx.row >= self.num_rows) { - self.table_ctx.row = self.num_rows - 1; - } - - Table.drawTable(alloc, win, rows, &self.table_ctx) catch { - self.drawEmpty(win); - }; - } - - fn drawEmpty(self: *ClusterView, win: vaxis.Window) void { - _ = self; - const msg = "No cluster data available"; - const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - const row: u16 = win.height / 2; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = col, - .row_offset = row, - .wrap = .none, - }); - } -}; diff --git a/tui/src/views/mod.rs b/tui/src/views/mod.rs new file mode 100644 index 0000000..660257f --- /dev/null +++ b/tui/src/views/mod.rs @@ -0,0 +1,4 @@ +pub mod backups; +pub mod cluster; +pub mod performance; +pub mod storage; diff --git a/tui/src/views/performance.rs b/tui/src/views/performance.rs new file mode 100644 index 0000000..0f84c32 --- /dev/null +++ b/tui/src/views/performance.rs @@ -0,0 +1,311 @@ +use crate::models::{PodMetricRow, Snapshot}; +use crate::util::truncate; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + Frame, + layout::{Constraint, Direction, Layout, Rect}, + style::{Color, Modifier, Style}, + text::{Line, Span}, + widgets::{Block, Borders, Cell, Paragraph, Row, Table, TableState}, +}; +use std::cmp::Ordering; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SortColumn { + Pod, + Namespace, + Cpu, + Memory, + NetRx, + NetTx, +} + +pub struct PerformanceViewState { + pub table: TableState, + pub sort_col: SortColumn, + pub sort_asc: bool, + pub ns_index: usize, +} + +impl Default for PerformanceViewState { + fn default() -> Self { + Self { + table: TableState::default(), + sort_col: SortColumn::Cpu, + sort_asc: false, + ns_index: 0, + } + } +} + +impl PerformanceViewState { + pub fn handle_key(&mut self, key: KeyEvent, pod_count: usize, namespace_count: usize) { + match key.code { + KeyCode::Char('s') => { + self.sort_col = match self.sort_col { + SortColumn::Pod => SortColumn::Namespace, + SortColumn::Namespace => SortColumn::Cpu, + SortColumn::Cpu => SortColumn::Memory, + SortColumn::Memory => SortColumn::NetRx, + SortColumn::NetRx => SortColumn::NetTx, + SortColumn::NetTx => SortColumn::Pod, + } + } + KeyCode::Char('S') => self.sort_asc = !self.sort_asc, + KeyCode::Char('n') => { + self.ns_index = if namespace_count == 0 { + 0 + } else { + (self.ns_index + 1) % (namespace_count + 1) + }; + } + KeyCode::Char('j') | KeyCode::Down => { + if pod_count > 0 { + let next = self + .table + .selected() + .unwrap_or(0) + .saturating_add(1) + .min(pod_count - 1); + self.table.select(Some(next)); + } + } + KeyCode::Char('k') | KeyCode::Up => { + let next = self.table.selected().unwrap_or(0).saturating_sub(1); + self.table.select(Some(next)); + } + KeyCode::Char('g') => self.table.select(Some(0)), + KeyCode::Char('G') => { + if pod_count > 0 { + self.table.select(Some(pod_count - 1)); + } + } + _ => {} + } + } +} + +pub fn namespace_count(snapshot: &Snapshot) -> usize { + unique_namespaces(snapshot).len() +} + +pub fn visible_pod_count(snapshot: &Snapshot, ns_index: usize) -> usize { + let namespaces = unique_namespaces(snapshot); + let active_ns = if ns_index == 0 { + None + } else { + namespaces.get(ns_index - 1).map(String::as_str) + }; + snapshot + .pods + .iter() + .filter(|pod| active_ns.map(|ns| pod.namespace == ns).unwrap_or(true)) + .count() +} + +pub fn render( + frame: &mut Frame, + area: Rect, + state: &mut PerformanceViewState, + snapshot: &Snapshot, +) { + if !snapshot.metrics_available && snapshot.hosts.is_empty() { + frame.render_widget( + Paragraph::new("No metrics backend detected") + .block(Block::default().borders(Borders::ALL).title("Performance")), + area, + ); + return; + } + + let namespaces = unique_namespaces(snapshot); + if state.ns_index > namespaces.len() { + state.ns_index = 0; + } + let active_ns = if state.ns_index == 0 { + None + } else { + Some(namespaces[state.ns_index - 1].as_str()) + }; + + let mut pods = snapshot + .pods + .iter() + .filter(|pod| active_ns.map(|ns| pod.namespace == ns).unwrap_or(true)) + .cloned() + .collect::>(); + sort_pods(&mut pods, state.sort_col, state.sort_asc); + if pods.is_empty() { + state.table.select(None); + } else { + let selected = state.table.selected().unwrap_or(0).min(pods.len() - 1); + state.table.select(Some(selected)); + } + + let chunks = Layout::default() + .direction(Direction::Vertical) + .constraints([ + Constraint::Percentage(35), + Constraint::Percentage(60), + Constraint::Length(2), + ]) + .split(area); + + let host_rows = snapshot.hosts.iter().map(|host| { + Row::new(vec![ + Cell::from(truncate(&host.name, 22)), + Cell::from(format!("{:>5.1}%", host.cpu_pct)), + Cell::from(format!("{} / {}", host.mem_used_str, host.mem_total_str)), + Cell::from(format!("{:>5.1}%", host.mem_pct)), + ]) + }); + let host_table = Table::new( + host_rows, + [ + Constraint::Length(22), + Constraint::Length(8), + Constraint::Length(24), + Constraint::Min(8), + ], + ) + .header( + Row::new(vec!["Host", "CPU", "Memory", "Mem %"]).style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block( + Block::default() + .borders(Borders::ALL) + .title("Host Overview"), + ); + frame.render_widget(host_table, chunks[0]); + + let pod_rows = pods.iter().map(|pod| { + Row::new(vec![ + Cell::from(truncate(&pod.pod, 20)), + Cell::from(truncate(&pod.namespace, 14)), + Cell::from(pod.cpu_str.clone()), + Cell::from(pod.mem_str.clone()), + Cell::from(pod.net_rx_str.clone()), + Cell::from(pod.net_tx_str.clone()), + ]) + }); + let title = format!("Pod Metrics [ns: {}]", active_ns.unwrap_or("all")); + let pod_table = Table::new( + pod_rows, + [ + Constraint::Length(20), + Constraint::Length(14), + Constraint::Length(10), + Constraint::Length(12), + Constraint::Length(12), + Constraint::Min(12), + ], + ) + .header( + Row::new(vec![ + "Pod", + "Namespace", + "CPU", + "Memory", + "Net RX", + "Net TX", + ]) + .style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title(title)) + .row_highlight_style( + Style::default() + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD), + ); + if state.table.selected().is_none() && !pods.is_empty() { + state.table.select(Some(0)); + } + frame.render_stateful_widget(pod_table, chunks[1], &mut state.table); + + let hint = Line::from(vec![ + Span::styled( + "s/S", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" sort "), + Span::styled( + "n", + Style::default() + .fg(Color::Yellow) + .add_modifier(Modifier::BOLD), + ), + Span::raw(" namespace "), + Span::raw(format!( + "current sort={}{}", + sort_name(state.sort_col), + if state.sort_asc { " asc" } else { " desc" } + )), + ]); + frame.render_widget( + Paragraph::new(hint).block(Block::default().borders(Borders::ALL).title("Performance")), + chunks[2], + ); +} + +fn unique_namespaces(snapshot: &Snapshot) -> Vec { + let mut namespaces = snapshot + .pods + .iter() + .map(|pod| pod.namespace.clone()) + .collect::>(); + namespaces.sort(); + namespaces.dedup(); + namespaces +} + +fn sort_pods(pods: &mut [PodMetricRow], sort_col: SortColumn, sort_asc: bool) { + pods.sort_by(|a, b| { + let ordering = match sort_col { + SortColumn::Pod => a.pod.cmp(&b.pod), + SortColumn::Namespace => a.namespace.cmp(&b.namespace), + SortColumn::Cpu => a + .cpu_cores + .partial_cmp(&b.cpu_cores) + .unwrap_or(Ordering::Equal), + SortColumn::Memory => a + .mem_bytes + .partial_cmp(&b.mem_bytes) + .unwrap_or(Ordering::Equal), + SortColumn::NetRx => a + .net_rx_bytes_sec + .partial_cmp(&b.net_rx_bytes_sec) + .unwrap_or(Ordering::Equal), + SortColumn::NetTx => a + .net_tx_bytes_sec + .partial_cmp(&b.net_tx_bytes_sec) + .unwrap_or(Ordering::Equal), + }; + if sort_asc { + ordering + } else { + ordering.reverse() + } + }); +} + +fn sort_name(column: SortColumn) -> &'static str { + match column { + SortColumn::Pod => "pod", + SortColumn::Namespace => "namespace", + SortColumn::Cpu => "cpu", + SortColumn::Memory => "memory", + SortColumn::NetRx => "net-rx", + SortColumn::NetTx => "net-tx", + } +} diff --git a/tui/src/views/performance.zig b/tui/src/views/performance.zig deleted file mode 100644 index 01152bc..0000000 --- a/tui/src/views/performance.zig +++ /dev/null @@ -1,393 +0,0 @@ -const std = @import("std"); -const vaxis = @import("vaxis"); -const poll = @import("../poll.zig"); - -const SortColumn = enum { pod, namespace, cpu, memory, net_rx, net_tx }; - -pub const PerformanceView = struct { - // Pod table navigation - selected: u16 = 0, - scroll: u16 = 0, - num_pods: u16 = 0, - - // Sorting - sort_col: SortColumn = .cpu, - sort_asc: bool = false, // descending by default (highest first) - - // Namespace filter - ns_filter: ?[]const u8 = null, // null = all namespaces - ns_index: u16 = 0, // index into discovered namespaces (0 = all) - - const host_header = " Host Overview"; - const host_col_header = " Node CPU Memory"; - const pod_col_header = " Pod Namespace CPU Memory Net RX Net TX"; - - pub fn init() PerformanceView { - return .{}; - } - - pub fn handleKey(self: *PerformanceView, key: vaxis.Key) void { - if (key.matches('s', .{})) { - self.cycleSortCol(); - } else if (key.matches('S', .{ .shift = true })) { - self.sort_asc = !self.sort_asc; - } else if (key.matches('n', .{})) { - self.ns_index +%= 1; // wraps, clamped in draw - } else if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { - if (self.num_pods > 0 and self.selected < self.num_pods - 1) self.selected += 1; - } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { - if (self.selected > 0) self.selected -= 1; - } else if (key.matches('g', .{})) { - self.selected = 0; - } else if (key.matches('G', .{ .shift = true })) { - if (self.num_pods > 0) self.selected = self.num_pods - 1; - } - } - - fn cycleSortCol(self: *PerformanceView) void { - self.sort_col = switch (self.sort_col) { - .pod => .namespace, - .namespace => .cpu, - .cpu => .memory, - .memory => .net_rx, - .net_rx => .net_tx, - .net_tx => .pod, - }; - } - - pub fn draw( - self: *PerformanceView, - alloc: std.mem.Allocator, - win: vaxis.Window, - hosts: []const poll.HostRow, - pods: []const poll.PodMetricRow, - metrics_available: bool, - ) void { - if (!metrics_available and hosts.len == 0) { - drawCentered(win, "No metrics backend detected"); - return; - } - - var current_row: u16 = 0; - - // Host overview section - if (hosts.len > 0) { - const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; - _ = win.print(&.{.{ .text = host_header, .style = hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - - const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; - _ = win.print(&.{.{ .text = host_col_header, .style = col_hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - - for (hosts) |h| { - if (current_row >= win.height -| 4) break; - self.drawHostRow(win, current_row, h); - current_row += 1; - } - current_row += 1; // spacing - } - - // Pod metrics section - if (!metrics_available) { - if (current_row < win.height -| 2) { - const hint: vaxis.Style = .{ .fg = .{ .index = 8 } }; - _ = win.print(&.{.{ .text = " Pod Metrics: No Prometheus/VictoriaMetrics detected", .style = hint }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - } - return; - } - - // Discover namespaces and apply filter - var namespaces: std.ArrayListUnmanaged([]const u8) = .empty; - defer namespaces.deinit(alloc); - for (pods) |p| { - var found = false; - for (namespaces.items) |ns| { - if (std.mem.eql(u8, ns, p.namespace)) { - found = true; - break; - } - } - if (!found) { - namespaces.append(alloc, p.namespace) catch continue; - } - } - - // Sort namespaces alphabetically - std.mem.sort([]const u8, namespaces.items, {}, struct { - fn cmp(_: void, a: []const u8, b: []const u8) bool { - return std.mem.order(u8, a, b) == .lt; - } - }.cmp); - - // Clamp namespace index (0 = all, 1..N = specific) - const total_ns = namespaces.items.len; - if (total_ns > 0 and self.ns_index > total_ns) { - self.ns_index = 0; - } - - const active_ns: ?[]const u8 = if (self.ns_index > 0 and self.ns_index <= total_ns) - namespaces.items[self.ns_index - 1] - else - null; - - // Filter pods by namespace - var filtered: std.ArrayListUnmanaged(poll.PodMetricRow) = .empty; - defer filtered.deinit(alloc); - for (pods) |p| { - if (active_ns) |ns| { - if (!std.mem.eql(u8, p.namespace, ns)) continue; - } - filtered.append(alloc, p) catch continue; - } - - // Sort filtered pods - self.sortPods(filtered.items); - - self.num_pods = @intCast(filtered.items.len); - if (self.num_pods == 0) { - self.selected = 0; - self.scroll = 0; - } else { - if (self.selected >= self.num_pods) self.selected = self.num_pods - 1; - if (self.scroll >= self.num_pods) self.scroll = self.num_pods - 1; - } - - // Pod header - { - var hdr_buf: [64]u8 = undefined; - const ns_label = if (active_ns) |ns| ns else "all"; - const pod_header = std.fmt.bufPrint(&hdr_buf, " Pod Metrics ({d}) [ns: {s}]", .{ - filtered.items.len, ns_label, - }) catch " Pod Metrics"; - const hdr_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 8 }, .bold = true }; - if (current_row < win.height -| 2) { - _ = win.print(&.{.{ .text = pod_header, .style = hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - } - } - - // Sort indicator in column headers - { - const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; - if (current_row < win.height -| 1) { - _ = win.print(&.{.{ .text = pod_col_header, .style = col_hdr_style }}, .{ - .row_offset = current_row, - .wrap = .none, - }); - current_row += 1; - } - } - - // Scrolling - const visible = win.height -| current_row -| 1; - if (visible == 0) { - self.scroll = 0; - return; - } - if (self.selected < self.scroll) { - self.scroll = self.selected; - } else if (self.selected >= self.scroll + visible) { - self.scroll = self.selected - visible + 1; - } - - // Pod rows - const start = self.scroll; - const end: u16 = @intCast(@min(filtered.items.len, start + visible)); - var idx: u16 = 0; - for (filtered.items[start..end]) |p| { - if (current_row >= win.height -| 1) break; - const is_selected = (start + idx == self.selected); - drawPodRow(win, current_row, p, is_selected); - current_row += 1; - idx += 1; - } - - // Status hints at bottom - if (win.height > 1) { - const sort_name = switch (self.sort_col) { - .pod => "pod", - .namespace => "ns", - .cpu => "cpu", - .memory => "mem", - .net_rx => "rx", - .net_tx => "tx", - }; - const dir = if (self.sort_asc) "asc" else "desc"; - var hint_buf: [64]u8 = undefined; - const hint = std.fmt.bufPrint(&hint_buf, " sort: {s} ({s}) s:cycle S:reverse n:namespace", .{ sort_name, dir }) catch ""; - _ = win.print(&.{.{ .text = hint, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .row_offset = win.height - 1, - .wrap = .none, - }); - } - } - - fn drawHostRow(self: *PerformanceView, win: vaxis.Window, row: u16, h: poll.HostRow) void { - _ = self; - const style: vaxis.Style = .{ .fg = .{ .index = 7 } }; - - var buf: [128]u8 = undefined; - const text = std.fmt.bufPrint(&buf, " {s:<18}", .{truncate(h.name, 18)}) catch return; - _ = win.print(&.{.{ .text = text, .style = style }}, .{ - .row_offset = row, - .wrap = .none, - }); - - // CPU bar at col 20 - drawBar(win, row, 20, h.cpu_pct, 15); - - // Memory bar at col 52 - var mem_buf: [32]u8 = undefined; - const mem_label = std.fmt.bufPrint(&mem_buf, " {s}/{s}", .{ - truncate(h.mem_used_str, 12), - truncate(h.mem_total_str, 12), - }) catch ""; - drawBar(win, row, 52, h.mem_pct, 15); - - _ = win.print(&.{.{ .text = mem_label, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = 74, - .row_offset = row, - .wrap = .none, - }); - } - - fn drawBar(win: vaxis.Window, row: u16, col: u16, pct: f64, width: u16) void { - const bar_color: vaxis.Color = if (pct > 90) - .{ .index = 1 } // red - else if (pct > 70) - .{ .index = 3 } // yellow - else - .{ .index = 2 }; // green - - const filled: u16 = @intFromFloat(@min( - @as(f64, @floatFromInt(width)), - @round(pct / 100.0 * @as(f64, @floatFromInt(width))), - )); - const empty_count = width - filled; - - var fill_buf: [60]u8 = undefined; - var fill_len: usize = 0; - for (0..filled) |_| { - const ch = "\u{2588}"; - @memcpy(fill_buf[fill_len..][0..ch.len], ch); - fill_len += ch.len; - } - - var empty_buf: [60]u8 = undefined; - var empty_len: usize = 0; - for (0..empty_count) |_| { - const ch = "\u{2591}"; - @memcpy(empty_buf[empty_len..][0..ch.len], ch); - empty_len += ch.len; - } - - var pct_buf: [8]u8 = undefined; - const pct_str = std.fmt.bufPrint(&pct_buf, "] {d:>3.0}%", .{pct}) catch "] ?%"; - - _ = win.print(&.{ - .{ .text = "[", .style = .{ .fg = .{ .index = 7 } } }, - .{ .text = fill_buf[0..fill_len], .style = .{ .fg = bar_color } }, - .{ .text = empty_buf[0..empty_len], .style = .{ .fg = .{ .index = 8 } } }, - .{ .text = pct_str, .style = .{ .fg = .{ .index = 7 } } }, - }, .{ - .col_offset = col, - .row_offset = row, - .wrap = .none, - }); - } - - fn drawPodRow(win: vaxis.Window, row: u16, p: poll.PodMetricRow, selected: bool) void { - const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; - const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; - const style: vaxis.Style = .{ .fg = fg, .bg = bg }; - - var buf: [256]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " {s:<33} {s:<16} {s:<9} {s:<12} {s:<12} {s}", .{ - truncate(p.pod, 33), - truncate(p.namespace, 16), - truncate(p.cpu_str, 9), - truncate(p.mem_str, 12), - truncate(p.net_rx_str, 12), - truncate(p.net_tx_str, 12), - }) catch return; - - _ = win.print(&.{.{ .text = line, .style = style }}, .{ - .row_offset = row, - .wrap = .none, - }); - } - - fn sortPods(self: *PerformanceView, items: []poll.PodMetricRow) void { - const asc = self.sort_asc; - switch (self.sort_col) { - .pod => std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - const ord = std.mem.order(u8, a.pod, b.pod); - return if (ascending) ord == .lt else ord == .gt; - } - }.cmp), - .namespace => std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - const ord = std.mem.order(u8, a.namespace, b.namespace); - return if (ascending) ord == .lt else ord == .gt; - } - }.cmp), - .cpu => std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - return if (ascending) a.cpu_cores < b.cpu_cores else a.cpu_cores > b.cpu_cores; - } - }.cmp), - .memory => std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - return if (ascending) a.mem_bytes < b.mem_bytes else a.mem_bytes > b.mem_bytes; - } - }.cmp), - .net_rx => { - std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - return if (ascending) - a.net_rx_bytes_sec < b.net_rx_bytes_sec - else - a.net_rx_bytes_sec > b.net_rx_bytes_sec; - } - }.cmp); - }, - .net_tx => { - std.mem.sort(poll.PodMetricRow, items, asc, struct { - fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - return if (ascending) - a.net_tx_bytes_sec < b.net_tx_bytes_sec - else - a.net_tx_bytes_sec > b.net_tx_bytes_sec; - } - }.cmp); - }, - } - } - - fn drawCentered(win: vaxis.Window, msg: []const u8) void { - const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = col, - .row_offset = win.height / 2, - .wrap = .none, - }); - } - - fn truncate(s: []const u8, max: usize) []const u8 { - return if (s.len > max) s[0..max] else s; - } -}; diff --git a/tui/src/views/storage.rs b/tui/src/views/storage.rs new file mode 100644 index 0000000..162269b --- /dev/null +++ b/tui/src/views/storage.rs @@ -0,0 +1,182 @@ +use crate::models::Snapshot; +use crate::util::truncate; +use crossterm::event::{KeyCode, KeyEvent}; +use ratatui::{ + Frame, + layout::{Constraint, Direction, Layout, Rect}, + style::{Color, Modifier, Style}, + widgets::{Block, Borders, Cell, Paragraph, Row, Table, TableState}, +}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum StorageSection { + Pools, + Disks, +} + +pub struct StorageViewState { + pub active_section: StorageSection, + pub pool_table: TableState, + pub disk_table: TableState, +} + +impl Default for StorageViewState { + fn default() -> Self { + Self { + active_section: StorageSection::Pools, + pool_table: TableState::default(), + disk_table: TableState::default(), + } + } +} + +impl StorageViewState { + pub fn handle_key(&mut self, key: KeyEvent, pool_count: usize, disk_count: usize) { + match key.code { + KeyCode::Left | KeyCode::Char('h') => self.active_section = StorageSection::Pools, + KeyCode::Right | KeyCode::Char('l') => self.active_section = StorageSection::Disks, + _ => match self.active_section { + StorageSection::Pools => handle_table_nav(&mut self.pool_table, key, pool_count), + StorageSection::Disks => handle_table_nav(&mut self.disk_table, key, disk_count), + }, + } + } +} + +fn handle_table_nav(state: &mut TableState, key: KeyEvent, count: usize) { + if count == 0 { + state.select(None); + return; + } + let selected = state.selected().unwrap_or(0); + let next = match key.code { + KeyCode::Char('j') | KeyCode::Down => selected.saturating_add(1).min(count - 1), + KeyCode::Char('k') | KeyCode::Up => selected.saturating_sub(1), + KeyCode::Char('g') => 0, + KeyCode::Char('G') => count - 1, + _ => selected, + }; + state.select(Some(next)); +} + +pub fn render( + frame: &mut Frame, + area: Rect, + state: &mut StorageViewState, + snapshot: &Snapshot, + warn: u8, + crit: u8, +) { + if snapshot.storage_pools.is_empty() && snapshot.vm_disks.is_empty() { + frame.render_widget( + Paragraph::new("No storage data available") + .block(Block::default().borders(Borders::ALL).title("Storage")), + area, + ); + return; + } + + let chunks = Layout::default() + .direction(Direction::Vertical) + .constraints([Constraint::Percentage(55), Constraint::Percentage(45)]) + .split(area); + + let pool_rows = snapshot.storage_pools.iter().map(|pool| { + let remaining = 100.0 - pool.usage_pct; + let color = if remaining < crit as f64 { + Color::Red + } else if remaining < warn as f64 { + Color::Yellow + } else { + Color::Green + }; + Row::new(vec![ + Cell::from(truncate(&pool.name, 16)), + Cell::from(truncate(&pool.node, 12)), + Cell::from(truncate(&pool.pool_type, 10)), + Cell::from(pool.used_str.clone()), + Cell::from(pool.total_str.clone()), + Cell::from(format!("{:>5.1}%", pool.usage_pct)).style(Style::default().fg(color)), + Cell::from(truncate(&pool.status, 12)), + ]) + }); + let pool_title = if state.active_section == StorageSection::Pools { + "Storage Pools (active: h/l switch panes)" + } else { + "Storage Pools" + }; + let pool_table = Table::new( + pool_rows, + [ + Constraint::Length(16), + Constraint::Length(12), + Constraint::Length(10), + Constraint::Length(12), + Constraint::Length(12), + Constraint::Length(8), + Constraint::Min(10), + ], + ) + .header( + Row::new(vec![ + "Pool", "Node", "Type", "Used", "Total", "Usage", "Status", + ]) + .style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title(pool_title)) + .row_highlight_style( + Style::default() + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD), + ); + if state.pool_table.selected().is_none() && !snapshot.storage_pools.is_empty() { + state.pool_table.select(Some(0)); + } + frame.render_stateful_widget(pool_table, chunks[0], &mut state.pool_table); + + let disk_rows = snapshot.vm_disks.iter().map(|disk| { + Row::new(vec![ + Cell::from(truncate(&disk.vm_name, 22)), + Cell::from(disk.vmid.clone()), + Cell::from(truncate(&disk.node, 12)), + Cell::from(disk.size_str.clone()), + ]) + }); + let disk_title = if state.active_section == StorageSection::Disks { + "VM Disks (active: h/l switch panes)" + } else { + "VM Disks" + }; + let disk_table = Table::new( + disk_rows, + [ + Constraint::Length(22), + Constraint::Length(8), + Constraint::Length(12), + Constraint::Min(10), + ], + ) + .header( + Row::new(vec!["VM Name", "VMID", "Node", "Size"]).style( + Style::default() + .fg(Color::Cyan) + .add_modifier(Modifier::BOLD), + ), + ) + .block(Block::default().borders(Borders::ALL).title(disk_title)) + .row_highlight_style( + Style::default() + .bg(Color::Blue) + .fg(Color::Black) + .add_modifier(Modifier::BOLD), + ); + if state.disk_table.selected().is_none() && !snapshot.vm_disks.is_empty() { + state.disk_table.select(Some(0)); + } + frame.render_stateful_widget(disk_table, chunks[1], &mut state.disk_table); +} diff --git a/tui/src/views/storage.zig b/tui/src/views/storage.zig deleted file mode 100644 index 643a4c2..0000000 --- a/tui/src/views/storage.zig +++ /dev/null @@ -1,287 +0,0 @@ -const std = @import("std"); -const vaxis = @import("vaxis"); -const poll = @import("../poll.zig"); - -const Table = vaxis.widgets.Table; - -const Section = enum { pools, disks }; - -pub const StorageView = struct { - active_section: Section = .pools, - - // Pool section state - pool_selected: u16 = 0, - pool_scroll: u16 = 0, - num_pools: u16 = 0, - - // Disk section state - disk_table_ctx: Table.TableContext, - num_disks: u16 = 0, - - // Thresholds - warn_threshold: u8, - crit_threshold: u8, - - const pool_header = " Pool Name Node Type Used Total Usage Status"; - const pool_header_sep = " ─────────────────────────────────────────────────────────────────────────────────────────────"; - - pub fn init(warn: u8, crit: u8) StorageView { - return .{ - .warn_threshold = warn, - .crit_threshold = crit, - .disk_table_ctx = .{ - .active = false, - .selected_bg = .{ .index = 4 }, - .selected_fg = .{ .index = 0 }, - .active_bg = .{ .index = 4 }, - .active_fg = .{ .index = 0 }, - .hdr_bg_1 = .{ .index = 8 }, - .hdr_bg_2 = .{ .index = 8 }, - .row_bg_1 = .default, - .row_bg_2 = .default, - .col_width = .dynamic_fill, - .header_names = .{ .custom = &.{ "VM Name", "VMID", "Node", "Size" } }, - }, - }; - } - - pub fn handleKey(self: *StorageView, key: vaxis.Key) void { - if (key.matches(vaxis.Key.tab, .{})) { - self.toggleSection(); - return; - } - - switch (self.active_section) { - .pools => self.handlePoolKey(key), - .disks => self.handleDiskKey(key), - } - } - - fn toggleSection(self: *StorageView) void { - self.active_section = if (self.active_section == .pools) .disks else .pools; - self.disk_table_ctx.active = (self.active_section == .disks); - } - - fn handlePoolKey(self: *StorageView, key: vaxis.Key) void { - if (self.num_pools == 0) return; - if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { - if (self.pool_selected < self.num_pools - 1) self.pool_selected += 1; - } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { - if (self.pool_selected > 0) self.pool_selected -= 1; - } else if (key.matches('g', .{})) { - self.pool_selected = 0; - } else if (key.matches('G', .{ .shift = true })) { - if (self.num_pools > 0) self.pool_selected = self.num_pools - 1; - } - } - - fn handleDiskKey(self: *StorageView, key: vaxis.Key) void { - if (self.num_disks == 0) return; - if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { - if (self.disk_table_ctx.row < self.num_disks - 1) self.disk_table_ctx.row += 1; - } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { - if (self.disk_table_ctx.row > 0) self.disk_table_ctx.row -= 1; - } else if (key.matches('g', .{})) { - self.disk_table_ctx.row = 0; - } else if (key.matches('G', .{ .shift = true })) { - if (self.num_disks > 0) self.disk_table_ctx.row = self.num_disks - 1; - } - } - - pub fn draw( - self: *StorageView, - alloc: std.mem.Allocator, - win: vaxis.Window, - pools: []const poll.StoragePoolRow, - disks: []const poll.VmDiskRow, - ) void { - self.num_pools = @intCast(pools.len); - self.num_disks = @intCast(disks.len); - - if (pools.len == 0 and disks.len == 0) { - drawEmpty(win); - return; - } - - // Clamp selections - if (self.pool_selected >= self.num_pools and self.num_pools > 0) - self.pool_selected = self.num_pools - 1; - if (self.disk_table_ctx.row >= self.num_disks and self.num_disks > 0) - self.disk_table_ctx.row = self.num_disks - 1; - - // Split layout: pools get top portion, disks get bottom - const sep_row: u16 = @intCast(@max(4, @min(win.height -| 6, (win.height * 55) / 100))); - const pools_win = win.child(.{ .height = sep_row }); - const disks_win = win.child(.{ .y_off = @intCast(sep_row + 1), .height = win.height -| sep_row -| 1 }); - - // Separator line - self.drawSeparator(win, sep_row); - - // Draw sections - self.drawPools(pools_win, pools); - self.drawDisks(alloc, disks_win, disks); - } - - fn drawSeparator(self: *StorageView, win: vaxis.Window, row: u16) void { - const label = if (self.active_section == .disks) " VM Disks (active) " else " VM Disks "; - const style: vaxis.Style = .{ .fg = .{ .index = 8 } }; - const active_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bold = true }; - _ = win.print(&.{.{ - .text = label, - .style = if (self.active_section == .disks) active_style else style, - }}, .{ .row_offset = row, .wrap = .none }); - } - - fn drawPools(self: *StorageView, win: vaxis.Window, pools: []const poll.StoragePoolRow) void { - if (pools.len == 0) return; - - const is_active = (self.active_section == .pools); - const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; - const hdr_label = if (is_active) " Storage Pools (active)" else " Storage Pools"; - - // Header - _ = win.print(&.{.{ .text = hdr_label, .style = hdr_style }}, .{ .wrap = .none }); - - // Column headers (row 1) - const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; - _ = win.print(&.{.{ .text = pool_header, .style = col_hdr_style }}, .{ - .row_offset = 1, - .wrap = .none, - }); - - // Scrolling - const visible_rows = win.height -| 2; - if (self.pool_selected < self.pool_scroll) { - self.pool_scroll = self.pool_selected; - } else if (self.pool_selected >= self.pool_scroll + visible_rows) { - self.pool_scroll = self.pool_selected - visible_rows + 1; - } - - // Rows - var row_idx: u16 = 0; - const start = self.pool_scroll; - const end: u16 = @intCast(@min(pools.len, start + visible_rows)); - for (pools[start..end]) |p| { - const display_row = row_idx + 2; // after header + col headers - const is_selected = is_active and (start + row_idx == self.pool_selected); - - self.drawPoolRow(win, display_row, p, is_selected); - row_idx += 1; - } - } - - fn drawPoolRow(self: *StorageView, win: vaxis.Window, row: u16, p: poll.StoragePoolRow, selected: bool) void { - const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; - const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; - const style: vaxis.Style = .{ .fg = fg, .bg = bg }; - - // Format: " name node type used total [bar] pct% status" - var buf: [256]u8 = undefined; - const line = std.fmt.bufPrint(&buf, " {s:<16} {s:<12} {s:<10} {s:<12} {s:<12}", .{ - truncate(p.name, 16), - truncate(p.node, 12), - truncate(p.pool_type, 10), - truncate(p.used_str, 12), - truncate(p.total_str, 12), - }) catch return; - - _ = win.print(&.{.{ .text = line, .style = style }}, .{ - .row_offset = row, - .wrap = .none, - }); - - // Usage bar at column 66 - self.drawUsageBar(win, row, 66, p.usage_pct, bg); - - // Status after bar (col ~84) - const status_style: vaxis.Style = .{ - .fg = if (std.mem.eql(u8, p.status, "available")) .{ .index = 2 } else .{ .index = 3 }, - .bg = bg, - }; - _ = win.print(&.{.{ .text = truncate(p.status, 10), .style = status_style }}, .{ - .col_offset = 85, - .row_offset = row, - .wrap = .none, - }); - } - - fn drawUsageBar(self: *StorageView, win: vaxis.Window, row: u16, col: u16, pct: f64, bg: vaxis.Color) void { - const bar_width: u16 = 10; - const remaining = 100.0 - pct; - const bar_color: vaxis.Color = if (remaining < @as(f64, @floatFromInt(self.crit_threshold))) - .{ .index = 1 } // red - else if (remaining < @as(f64, @floatFromInt(self.warn_threshold))) - .{ .index = 3 } // yellow - else - .{ .index = 2 }; // green - - const filled: u16 = @intFromFloat(@min( - @as(f64, @floatFromInt(bar_width)), - @round(pct / 100.0 * @as(f64, @floatFromInt(bar_width))), - )); - const empty_count = bar_width - filled; - - // Build fill/empty strings from Unicode blocks - var fill_buf: [30]u8 = undefined; - var fill_len: usize = 0; - for (0..filled) |_| { - const ch = "\u{2588}"; - @memcpy(fill_buf[fill_len..][0..ch.len], ch); - fill_len += ch.len; - } - - var empty_buf: [30]u8 = undefined; - var empty_len: usize = 0; - for (0..empty_count) |_| { - const ch = "\u{2591}"; - @memcpy(empty_buf[empty_len..][0..ch.len], ch); - empty_len += ch.len; - } - - var pct_buf: [8]u8 = undefined; - const pct_str = std.fmt.bufPrint(&pct_buf, "] {d:>3.0}%", .{pct}) catch "] ?%"; - - _ = win.print(&.{ - .{ .text = "[", .style = .{ .fg = .{ .index = 7 }, .bg = bg } }, - .{ .text = fill_buf[0..fill_len], .style = .{ .fg = bar_color, .bg = bg } }, - .{ .text = empty_buf[0..empty_len], .style = .{ .fg = .{ .index = 8 }, .bg = bg } }, - .{ .text = pct_str, .style = .{ .fg = .{ .index = 7 }, .bg = bg } }, - }, .{ - .col_offset = col, - .row_offset = row, - .wrap = .none, - }); - } - - fn drawDisks(self: *StorageView, alloc: std.mem.Allocator, win: vaxis.Window, disks: []const poll.VmDiskRow) void { - if (disks.len == 0) { - const msg = "No VM disk data"; - const c: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = c, - .row_offset = win.height / 2, - .wrap = .none, - }); - return; - } - - if (self.disk_table_ctx.row >= self.num_disks) - self.disk_table_ctx.row = self.num_disks - 1; - - Table.drawTable(alloc, win, disks, &self.disk_table_ctx) catch {}; - } - - fn drawEmpty(win: vaxis.Window) void { - const msg = "No storage data available"; - const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; - _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ - .col_offset = col, - .row_offset = win.height / 2, - .wrap = .none, - }); - } - - fn truncate(s: []const u8, max: usize) []const u8 { - return if (s.len > max) s[0..max] else s; - } -};