From dccb38b2ba6273b35b8bf82703793a90dd4cf57e Mon Sep 17 00:00:00 2001 From: OneNoted Date: Wed, 18 Mar 2026 23:08:44 +0100 Subject: [PATCH 01/14] chore: add zig build artifacts and PRD to gitignore --- .gitignore | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index 657c9d2..d351ed2 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,9 @@ dist/ # Config with secrets pvt.yaml + +# Zig build artifacts +tui/zig-out/ +tui/.zig-cache/ + +vitui.md From cd7780294f819ef7b9a667c045c9b4480e2edfb3 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Wed, 18 Mar 2026 23:08:56 +0100 Subject: [PATCH 02/14] feat: add vitui interactive TUI for cluster monitoring Zig-based terminal UI (libvaxis) that reads pvt.yaml and provides four live views: Cluster status, Storage pools, Backups (PVE + K8s), and Performance metrics (PromQL). - Config parsing with env var expansion and duration strings - Proxmox REST API client via curl subprocess (handles tls_verify: false) - Talos client via talosctl subprocess with JSON parsing - Kubernetes client via kubectl for VolSync/Velero backup detection - Metrics client with Prometheus/VictoriaMetrics autodetection - Background polling with mutex-protected shared state - Vim-style navigation (hjkl, /, gg/G) across all views - Storage usage bars with configurable warn/crit thresholds - Backup stale flagging with confirmation dialog for deletion - Search/filter overlay in backups view - Pod metrics sortable by CPU/memory/network, filterable by namespace - Help overlay with view-specific keybinding hints --- tui/build.zig | 55 ++ tui/build.zig.zon | 21 + tui/src/api/http_client.zig | 120 +++++ tui/src/api/kubernetes.zig | 249 +++++++++ tui/src/api/metrics.zig | 265 ++++++++++ tui/src/api/proxmox.zig | 246 +++++++++ tui/src/api/talos.zig | 170 ++++++ tui/src/app.zig | 485 +++++++++++++++++ tui/src/config.zig | 300 +++++++++++ tui/src/main.zig | 70 +++ tui/src/poll.zig | 957 ++++++++++++++++++++++++++++++++++ tui/src/views/backups.zig | 442 ++++++++++++++++ tui/src/views/cluster.zig | 82 +++ tui/src/views/performance.zig | 380 ++++++++++++++ tui/src/views/storage.zig | 287 ++++++++++ 15 files changed, 4129 insertions(+) create mode 100644 tui/build.zig create mode 100644 tui/build.zig.zon create mode 100644 tui/src/api/http_client.zig create mode 100644 tui/src/api/kubernetes.zig create mode 100644 tui/src/api/metrics.zig create mode 100644 tui/src/api/proxmox.zig create mode 100644 tui/src/api/talos.zig create mode 100644 tui/src/app.zig create mode 100644 tui/src/config.zig create mode 100644 tui/src/main.zig create mode 100644 tui/src/poll.zig create mode 100644 tui/src/views/backups.zig create mode 100644 tui/src/views/cluster.zig create mode 100644 tui/src/views/performance.zig create mode 100644 tui/src/views/storage.zig diff --git a/tui/build.zig b/tui/build.zig new file mode 100644 index 0000000..8dc1565 --- /dev/null +++ b/tui/build.zig @@ -0,0 +1,55 @@ +const std = @import("std"); + +pub fn build(b: *std.Build) void { + const target = b.standardTargetOptions(.{}); + const optimize = b.standardOptimizeOption(.{}); + + // Dependencies + const vaxis_dep = b.dependency("vaxis", .{ + .target = target, + .optimize = optimize, + }); + const yaml_dep = b.dependency("zig_yaml", .{ + .target = target, + .optimize = optimize, + }); + + // Executable + const exe = b.addExecutable(.{ + .name = "vitui", + .root_module = b.createModule(.{ + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + .imports = &.{ + .{ .name = "vaxis", .module = vaxis_dep.module("vaxis") }, + .{ .name = "yaml", .module = yaml_dep.module("yaml") }, + }, + }), + }); + b.installArtifact(exe); + + // Run step + const run_step = b.step("run", "Run vitui"); + const run_cmd = b.addRunArtifact(exe); + run_step.dependOn(&run_cmd.step); + run_cmd.step.dependOn(b.getInstallStep()); + if (b.args) |args| { + run_cmd.addArgs(args); + } + + // Tests + const exe_tests = b.addTest(.{ + .root_module = b.createModule(.{ + .root_source_file = b.path("src/main.zig"), + .target = target, + .optimize = optimize, + .imports = &.{ + .{ .name = "vaxis", .module = vaxis_dep.module("vaxis") }, + .{ .name = "yaml", .module = yaml_dep.module("yaml") }, + }, + }), + }); + const test_step = b.step("test", "Run tests"); + test_step.dependOn(&b.addRunArtifact(exe_tests).step); +} diff --git a/tui/build.zig.zon b/tui/build.zig.zon new file mode 100644 index 0000000..57d1c24 --- /dev/null +++ b/tui/build.zig.zon @@ -0,0 +1,21 @@ +.{ + .name = .vitui, + .version = "0.1.0", + .fingerprint = 0xc340ce385d55450f, + .minimum_zig_version = "0.15.2", + .dependencies = .{ + .vaxis = .{ + .url = "git+https://github.com/rockorager/libvaxis#41fff922316dcb8776332ec460e73eaf397d5033", + .hash = "vaxis-0.5.1-BWNV_JJOCQAtdJyLvrYCKbKIhX9q3liQkKMAzujWS4HJ", + }, + .zig_yaml = .{ + .url = "git+https://github.com/kubkon/zig-yaml#a6c2cd8760bf45c49b17a3f6259c4dfe3ded528e", + .hash = "zig_yaml-0.2.0-C1161pmrAgDnipDTh_4v4RQD27XN5GNaVlzzvlXf1jfW", + }, + }, + .paths = .{ + "build.zig", + "build.zig.zon", + "src", + }, +} diff --git a/tui/src/api/http_client.zig b/tui/src/api/http_client.zig new file mode 100644 index 0000000..929d087 --- /dev/null +++ b/tui/src/api/http_client.zig @@ -0,0 +1,120 @@ +const std = @import("std"); +const config = @import("../config.zig"); +const Allocator = std.mem.Allocator; + +/// HTTP client that uses curl subprocess for Proxmox API requests. +/// Handles PVE API token auth and TLS certificate skipping for self-signed certs. +pub const HttpClient = struct { + allocator: Allocator, + endpoint: []const u8, + auth_header: []const u8, + tls_verify: bool, + + pub fn init(allocator: Allocator, pve: config.ProxmoxCluster) HttpClient { + return .{ + .allocator = allocator, + .endpoint = pve.endpoint, + .auth_header = pve.token_secret, + .tls_verify = pve.tls_verify, + }; + } + + /// Perform a GET request. Caller owns the returned memory. + pub fn get(self: HttpClient, path: []const u8) ![]const u8 { + return self.request("GET", path); + } + + /// Perform a DELETE request. Caller owns the returned memory. + pub fn delete(self: HttpClient, path: []const u8) ![]const u8 { + return self.request("DELETE", path); + } + + fn request(self: HttpClient, method: []const u8, path: []const u8) ![]const u8 { + const url = try std.fmt.allocPrint(self.allocator, "{s}{s}", .{ self.endpoint, path }); + defer self.allocator.free(url); + + const auth = try std.fmt.allocPrint(self.allocator, "Authorization: PVEAPIToken={s}", .{self.auth_header}); + defer self.allocator.free(auth); + + var argv_list: std.ArrayListUnmanaged([]const u8) = .empty; + defer argv_list.deinit(self.allocator); + + try argv_list.appendSlice(self.allocator, &.{ "curl", "-s", "-f", "--max-time", "10" }); + if (!std.mem.eql(u8, method, "GET")) { + try argv_list.appendSlice(self.allocator, &.{ "-X", method }); + } + try argv_list.appendSlice(self.allocator, &.{ "-H", auth }); + if (!self.tls_verify) { + try argv_list.append(self.allocator, "-k"); + } + try argv_list.append(self.allocator, url); + + const result = std.process.Child.run(.{ + .allocator = self.allocator, + .argv = argv_list.items, + .max_output_bytes = 1024 * 1024, + }) catch |err| { + std.log.err("failed to run curl: {}", .{err}); + return error.HttpRequestFailed; + }; + defer self.allocator.free(result.stderr); + + const term = result.term; + if (term == .Exited and term.Exited == 0) { + return result.stdout; + } + + self.allocator.free(result.stdout); + std.log.err("curl {s} failed (exit {}): {s}", .{ method, term, result.stderr }); + return error.HttpRequestFailed; + } + + pub fn deinit(self: *HttpClient) void { + _ = self; + } +}; + +/// Parse a JSON response body and extract the "data" field. +/// Returns the parsed JSON Value. Caller must call `parsed.deinit()`. +pub fn parseJsonResponse(allocator: Allocator, body: []const u8) !std.json.Parsed(std.json.Value) { + return std.json.parseFromSlice(std.json.Value, allocator, body, .{ + .ignore_unknown_fields = true, + .allocate = .alloc_always, + }); +} + +/// Extract a string field from a JSON object, returning a default if missing. +pub fn jsonStr(obj: std.json.ObjectMap, key: []const u8, default: []const u8) []const u8 { + const val = obj.get(key) orelse return default; + return switch (val) { + .string => |s| s, + else => default, + }; +} + +/// Extract an integer field from a JSON object, returning a default if missing. +pub fn jsonInt(obj: std.json.ObjectMap, key: []const u8, default: i64) i64 { + const val = obj.get(key) orelse return default; + return switch (val) { + .integer => |i| i, + .float => |f| @intFromFloat(f), + .string => |s| std.fmt.parseInt(i64, s, 10) catch default, + else => default, + }; +} + +/// Extract a float field from a JSON object, returning a default if missing. +pub fn jsonFloat(obj: std.json.ObjectMap, key: []const u8, default: f64) f64 { + const val = obj.get(key) orelse return default; + return switch (val) { + .float => |f| f, + .integer => |i| @floatFromInt(i), + else => default, + }; +} + +test "jsonStr returns default for missing key" { + var map = std.json.ObjectMap.init(std.testing.allocator); + defer map.deinit(); + try std.testing.expectEqualStrings("fallback", jsonStr(map, "missing", "fallback")); +} diff --git a/tui/src/api/kubernetes.zig b/tui/src/api/kubernetes.zig new file mode 100644 index 0000000..5f39778 --- /dev/null +++ b/tui/src/api/kubernetes.zig @@ -0,0 +1,249 @@ +const std = @import("std"); +const config = @import("../config.zig"); +const http = @import("http_client.zig"); +const Allocator = std.mem.Allocator; + +pub const K8sBackupEntry = struct { + name: []const u8, + namespace: []const u8, + source_type: []const u8, // "VolSync" or "Velero" + status: []const u8, + schedule: []const u8, + last_run: []const u8, +}; + +pub const DetectedProviders = struct { + volsync: bool = false, + velero: bool = false, +}; + +pub const KubeClient = struct { + allocator: Allocator, + kubeconfig: []const u8, + + pub fn init(allocator: Allocator, kubeconfig: []const u8) KubeClient { + return .{ + .allocator = allocator, + .kubeconfig = kubeconfig, + }; + } + + /// Detect which backup providers (VolSync, Velero) are installed by checking CRDs. + pub fn detectProviders(self: *KubeClient) DetectedProviders { + const output = self.runKubectl(&.{ + "get", "crd", "--no-headers", "-o", "custom-columns=NAME:.metadata.name", + }) orelse return .{}; + defer self.allocator.free(output); + + var result = DetectedProviders{}; + var lines = std.mem.splitScalar(u8, output, '\n'); + while (lines.next()) |line| { + const trimmed = std.mem.trim(u8, line, " \t\r"); + if (trimmed.len == 0) continue; + if (std.mem.indexOf(u8, trimmed, "volsync") != null) result.volsync = true; + if (std.mem.indexOf(u8, trimmed, "velero") != null) result.velero = true; + } + return result; + } + + /// Fetch VolSync ReplicationSources across all namespaces. + pub fn getVolsyncSources(self: *KubeClient) []K8sBackupEntry { + const output = self.runKubectl(&.{ + "get", "replicationsources.volsync.backube", "-A", "-o", "json", + }) orelse return &.{}; + defer self.allocator.free(output); + + return self.parseK8sBackups(output, "VolSync"); + } + + /// Fetch Velero Backups across all namespaces. + pub fn getVeleroBackups(self: *KubeClient) []K8sBackupEntry { + const output = self.runKubectl(&.{ + "get", "backups.velero.io", "-A", "-o", "json", + }) orelse return &.{}; + defer self.allocator.free(output); + + return self.parseK8sBackups(output, "Velero"); + } + + fn parseK8sBackups(self: *KubeClient, output: []const u8, source_type: []const u8) []K8sBackupEntry { + var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ + .ignore_unknown_fields = true, + .allocate = .alloc_always, + }) catch return &.{}; + defer parsed.deinit(); + + const root = switch (parsed.value) { + .object => |obj| obj, + else => return &.{}, + }; + + const items = switch (root.get("items") orelse return &.{}) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var results: std.ArrayListUnmanaged(K8sBackupEntry) = .empty; + for (items) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + + const metadata = switch (obj.get("metadata") orelse continue) { + .object => |o| o, + else => continue, + }; + + const name = self.allocator.dupe(u8, http.jsonStr(metadata, "name", "unknown")) catch continue; + const namespace = self.allocator.dupe(u8, http.jsonStr(metadata, "namespace", "default")) catch continue; + + // Extract status and schedule based on source type + var status: []const u8 = undefined; + var schedule: []const u8 = undefined; + var last_run: []const u8 = undefined; + + if (std.mem.eql(u8, source_type, "VolSync")) { + status = self.parseVolsyncStatus(obj); + schedule = self.parseVolsyncSchedule(obj); + last_run = self.parseVolsyncLastRun(obj); + } else { + status = self.parseVeleroStatus(obj); + schedule = self.parseVeleroSchedule(obj); + last_run = self.parseVeleroLastRun(obj); + } + + results.append(self.allocator, .{ + .name = name, + .namespace = namespace, + .source_type = self.allocator.dupe(u8, source_type) catch continue, + .status = status, + .schedule = schedule, + .last_run = last_run, + }) catch continue; + } + + return results.toOwnedSlice(self.allocator) catch &.{}; + } + + fn parseVolsyncStatus(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { + .object => |o| o, + else => return self.allocator.dupe(u8, "unknown") catch "unknown", + }; + + // Check conditions array for Synchronizing condition + const conditions = switch (status_obj.get("conditions") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { + .array => |arr| arr.items, + else => return self.allocator.dupe(u8, "unknown") catch "unknown", + }; + + for (conditions) |cond| { + const cond_obj = switch (cond) { + .object => |o| o, + else => continue, + }; + const cond_type = http.jsonStr(cond_obj, "type", ""); + if (std.mem.eql(u8, cond_type, "Synchronizing")) { + const cond_status = http.jsonStr(cond_obj, "status", "Unknown"); + if (std.mem.eql(u8, cond_status, "True")) { + return self.allocator.dupe(u8, "Syncing") catch "Syncing"; + } + return self.allocator.dupe(u8, "Idle") catch "Idle"; + } + } + + return self.allocator.dupe(u8, "unknown") catch "unknown"; + } + + fn parseVolsyncSchedule(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const spec = switch (obj.get("spec") orelse return self.allocator.dupe(u8, "-") catch "-") { + .object => |o| o, + else => return self.allocator.dupe(u8, "-") catch "-", + }; + const trigger = switch (spec.get("trigger") orelse return self.allocator.dupe(u8, "-") catch "-") { + .object => |o| o, + else => return self.allocator.dupe(u8, "-") catch "-", + }; + return self.allocator.dupe(u8, http.jsonStr(trigger, "schedule", "-")) catch "-"; + } + + fn parseVolsyncLastRun(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "-") catch "-") { + .object => |o| o, + else => return self.allocator.dupe(u8, "-") catch "-", + }; + return self.allocator.dupe(u8, http.jsonStr(status_obj, "lastSyncTime", "-")) catch "-"; + } + + fn parseVeleroStatus(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "unknown") catch "unknown") { + .object => |o| o, + else => return self.allocator.dupe(u8, "unknown") catch "unknown", + }; + return self.allocator.dupe(u8, http.jsonStr(status_obj, "phase", "unknown")) catch "unknown"; + } + + fn parseVeleroSchedule(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const spec = switch (obj.get("spec") orelse return self.allocator.dupe(u8, "-") catch "-") { + .object => |o| o, + else => return self.allocator.dupe(u8, "-") catch "-", + }; + return self.allocator.dupe(u8, http.jsonStr(spec, "scheduleName", "-")) catch "-"; + } + + fn parseVeleroLastRun(self: *KubeClient, obj: std.json.ObjectMap) []const u8 { + const status_obj = switch (obj.get("status") orelse return self.allocator.dupe(u8, "-") catch "-") { + .object => |o| o, + else => return self.allocator.dupe(u8, "-") catch "-", + }; + return self.allocator.dupe(u8, http.jsonStr(status_obj, "completionTimestamp", "-")) catch "-"; + } + + /// Run a kubectl command with standard flags and return stdout. + /// Returns null on any failure. + fn runKubectl(self: *KubeClient, extra_args: []const []const u8) ?[]const u8 { + var argv: std.ArrayListUnmanaged([]const u8) = .empty; + defer argv.deinit(self.allocator); + + argv.append(self.allocator, "kubectl") catch return null; + argv.appendSlice(self.allocator, extra_args) catch return null; + argv.appendSlice(self.allocator, &.{ + "--kubeconfig", self.kubeconfig, + }) catch return null; + + const result = std.process.Child.run(.{ + .allocator = self.allocator, + .argv = argv.items, + .max_output_bytes = 512 * 1024, + }) catch return null; + defer self.allocator.free(result.stderr); + + const term = result.term; + if (term == .Exited and term.Exited == 0) { + return result.stdout; + } + + self.allocator.free(result.stdout); + return null; + } + + pub fn deinit(self: *KubeClient) void { + _ = self; + } +}; + +/// Derive the kubeconfig path from a talos config path. +/// Given "~/talos/apollo/talosconfig", returns "~/talos/apollo/kubeconfig". +pub fn deriveKubeconfig(allocator: Allocator, talos_config_path: []const u8) ?[]const u8 { + // Find the last path separator + const dir_end = std.mem.lastIndexOfScalar(u8, talos_config_path, '/') orelse return null; + return std.fmt.allocPrint(allocator, "{s}/kubeconfig", .{talos_config_path[0..dir_end]}) catch null; +} + +test "deriveKubeconfig" { + const alloc = std.testing.allocator; + const result = deriveKubeconfig(alloc, "~/talos/apollo/talosconfig") orelse unreachable; + defer alloc.free(result); + try std.testing.expectEqualStrings("~/talos/apollo/kubeconfig", result); +} diff --git a/tui/src/api/metrics.zig b/tui/src/api/metrics.zig new file mode 100644 index 0000000..d86ede3 --- /dev/null +++ b/tui/src/api/metrics.zig @@ -0,0 +1,265 @@ +const std = @import("std"); +const config = @import("../config.zig"); +const http = @import("http_client.zig"); +const Allocator = std.mem.Allocator; + +pub const PodMetrics = struct { + pod: []const u8, + namespace: []const u8, + cpu_cores: f64, // fractional cores + memory_bytes: f64, + net_rx_bytes_sec: f64, + net_tx_bytes_sec: f64, +}; + +pub const NodeMetrics = struct { + instance: []const u8, + cpu_usage: f64, // 0.0 - 1.0 + mem_used: f64, // bytes + mem_total: f64, // bytes +}; + +pub const MetricsClient = struct { + allocator: Allocator, + endpoint: []const u8, // e.g. "http://prometheus.monitoring.svc:9090" + available: bool = false, + + pub fn init(allocator: Allocator, kubeconfig: []const u8) MetricsClient { + // Autodetect metrics endpoint by querying known service names + const candidates = [_]struct { ns: []const u8, svc: []const u8, port: []const u8 }{ + .{ .ns = "monitoring", .svc = "vmsingle-victoria-metrics-victoria-metrics-single-server", .port = "8428" }, + .{ .ns = "monitoring", .svc = "vmselect", .port = "8481" }, + .{ .ns = "monitoring", .svc = "prometheus-server", .port = "9090" }, + .{ .ns = "monitoring", .svc = "prometheus-operated", .port = "9090" }, + .{ .ns = "observability", .svc = "prometheus-server", .port = "9090" }, + .{ .ns = "observability", .svc = "prometheus-operated", .port = "9090" }, + }; + + for (candidates) |c| { + const endpoint = detectEndpoint(allocator, kubeconfig, c.ns, c.svc, c.port); + if (endpoint) |ep| { + return .{ + .allocator = allocator, + .endpoint = ep, + .available = true, + }; + } + } + + return .{ + .allocator = allocator, + .endpoint = "", + .available = false, + }; + } + + fn detectEndpoint(allocator: Allocator, kubeconfig: []const u8, ns: []const u8, svc: []const u8, port: []const u8) ?[]const u8 { + // Use kubectl to check if the service exists + var argv: std.ArrayListUnmanaged([]const u8) = .empty; + defer argv.deinit(allocator); + argv.appendSlice(allocator, &.{ + "kubectl", "get", "svc", svc, "-n", ns, + "--kubeconfig", kubeconfig, + "--no-headers", "-o", "name", + }) catch return null; + + const result = std.process.Child.run(.{ + .allocator = allocator, + .argv = argv.items, + .max_output_bytes = 4096, + }) catch return null; + defer allocator.free(result.stderr); + defer allocator.free(result.stdout); + + const term = result.term; + if (term == .Exited and term.Exited == 0 and result.stdout.len > 0) { + return std.fmt.allocPrint(allocator, "http://{s}.{s}.svc:{s}", .{ svc, ns, port }) catch null; + } + return null; + } + + /// Query pod CPU usage via PromQL. + pub fn getPodCpu(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "sum(rate(container_cpu_usage_seconds_total{container!=\"\",pod!=\"\"}[5m])) by (pod, namespace)", + ); + } + + /// Query pod memory usage via PromQL. + pub fn getPodMemory(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "sum(container_memory_working_set_bytes{container!=\"\",pod!=\"\"}) by (pod, namespace)", + ); + } + + /// Query pod network rx via PromQL. + pub fn getPodNetRx(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "sum(rate(container_network_receive_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)", + ); + } + + /// Query pod network tx via PromQL. + pub fn getPodNetTx(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "sum(rate(container_network_transmit_bytes_total{pod!=\"\"}[5m])) by (pod, namespace)", + ); + } + + /// Query node CPU usage via PromQL. + pub fn getNodeCpu(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "1 - avg(rate(node_cpu_seconds_total{mode=\"idle\"}[5m])) by (instance)", + ); + } + + /// Query node memory usage via PromQL. + pub fn getNodeMemUsed(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL( + "node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes", + ); + } + + /// Query node total memory via PromQL. + pub fn getNodeMemTotal(self: *MetricsClient) []PodMetricValue { + if (!self.available) return &.{}; + return self.queryPromQL("node_memory_MemTotal_bytes"); + } + + pub const PodMetricValue = struct { + labels: std.json.ObjectMap, + value: f64, + }; + + fn queryPromQL(self: *MetricsClient, query: []const u8) []PodMetricValue { + const alloc = self.allocator; + + // URL-encode the query + var encoded: std.ArrayListUnmanaged(u8) = .empty; + defer encoded.deinit(alloc); + for (query) |c| { + switch (c) { + ' ' => encoded.appendSlice(alloc, "%20") catch return &.{}, + '"' => encoded.appendSlice(alloc, "%22") catch return &.{}, + '{' => encoded.appendSlice(alloc, "%7B") catch return &.{}, + '}' => encoded.appendSlice(alloc, "%7D") catch return &.{}, + '!' => encoded.appendSlice(alloc, "%21") catch return &.{}, + '[' => encoded.appendSlice(alloc, "%5B") catch return &.{}, + ']' => encoded.appendSlice(alloc, "%5D") catch return &.{}, + '=' => encoded.appendSlice(alloc, "%3D") catch return &.{}, + else => encoded.append(alloc, c) catch return &.{}, + } + } + + const url = std.fmt.allocPrint(alloc, "{s}/api/v1/query?query={s}", .{ + self.endpoint, + encoded.items, + }) catch return &.{}; + defer alloc.free(url); + + // Use curl to query (unauthenticated, in-cluster) + var argv: std.ArrayListUnmanaged([]const u8) = .empty; + defer argv.deinit(alloc); + argv.appendSlice(alloc, &.{ + "curl", "-s", "-f", "--max-time", "5", url, + }) catch return &.{}; + + const result = std.process.Child.run(.{ + .allocator = alloc, + .argv = argv.items, + .max_output_bytes = 1024 * 1024, + }) catch return &.{}; + defer alloc.free(result.stderr); + + const term = result.term; + if (!(term == .Exited and term.Exited == 0)) { + alloc.free(result.stdout); + return &.{}; + } + defer alloc.free(result.stdout); + + return self.parsePromResponse(result.stdout); + } + + fn parsePromResponse(self: *MetricsClient, body: []const u8) []PodMetricValue { + const alloc = self.allocator; + var parsed = std.json.parseFromSlice(std.json.Value, alloc, body, .{ + .ignore_unknown_fields = true, + .allocate = .alloc_always, + }) catch return &.{}; + defer parsed.deinit(); + + const root = switch (parsed.value) { + .object => |obj| obj, + else => return &.{}, + }; + + const data = switch (root.get("data") orelse return &.{}) { + .object => |obj| obj, + else => return &.{}, + }; + + const results_arr = switch (data.get("result") orelse return &.{}) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var out: std.ArrayListUnmanaged(PodMetricValue) = .empty; + for (results_arr) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + + // metric labels + const metric = switch (obj.get("metric") orelse continue) { + .object => |o| o, + else => continue, + }; + + // value is [timestamp, "value_string"] + const value_arr = switch (obj.get("value") orelse continue) { + .array => |arr| arr.items, + else => continue, + }; + if (value_arr.len < 2) continue; + + const val_str = switch (value_arr[1]) { + .string => |s| s, + else => continue, + }; + const val = std.fmt.parseFloat(f64, val_str) catch continue; + + // Clone metric labels so they survive parsed.deinit() + var cloned_labels = std.json.ObjectMap.init(alloc); + var it = metric.iterator(); + while (it.next()) |entry| { + const key = alloc.dupe(u8, entry.key_ptr.*) catch continue; + const label_val = switch (entry.value_ptr.*) { + .string => |s| std.json.Value{ .string = alloc.dupe(u8, s) catch continue }, + else => continue, + }; + cloned_labels.put(key, label_val) catch continue; + } + + out.append(alloc, .{ + .labels = cloned_labels, + .value = val, + }) catch continue; + } + + return out.toOwnedSlice(alloc) catch &.{}; + } + + pub fn deinit(self: *MetricsClient) void { + if (self.available and self.endpoint.len > 0) { + self.allocator.free(self.endpoint); + } + } +}; diff --git a/tui/src/api/proxmox.zig b/tui/src/api/proxmox.zig new file mode 100644 index 0000000..264f94b --- /dev/null +++ b/tui/src/api/proxmox.zig @@ -0,0 +1,246 @@ +const std = @import("std"); +const config = @import("../config.zig"); +const http = @import("http_client.zig"); +const Allocator = std.mem.Allocator; + +pub const VmStatus = struct { + vmid: i64, + name: []const u8, + status: []const u8, + node: []const u8, + maxdisk: i64, +}; + +pub const StoragePool = struct { + name: []const u8, + node: []const u8, + pool_type: []const u8, + status: []const u8, + disk: i64, + maxdisk: i64, +}; + +pub const NodeStatus = struct { + node: []const u8, + status: []const u8, + cpu: f64, + mem: i64, + maxmem: i64, + uptime: i64, +}; + +pub const BackupEntry = struct { + volid: []const u8, + node: []const u8, + storage: []const u8, + size: i64, + ctime: i64, + vmid: i64, + format: []const u8, +}; + +pub const ProxmoxClient = struct { + client: http.HttpClient, + allocator: Allocator, + + pub fn init(allocator: Allocator, pve: config.ProxmoxCluster) ProxmoxClient { + return .{ + .client = http.HttpClient.init(allocator, pve), + .allocator = allocator, + }; + } + + /// Fetch all VM resources across the PVE cluster. + pub fn getClusterResources(self: *ProxmoxClient) ![]VmStatus { + const body = self.client.get("/api2/json/cluster/resources?type=vm") catch { + return &.{}; + }; + defer self.allocator.free(body); + + var parsed = http.parseJsonResponse(self.allocator, body) catch { + return &.{}; + }; + defer parsed.deinit(); + + const data_val = switch (parsed.value) { + .object => |obj| obj.get("data") orelse return &.{}, + else => return &.{}, + }; + const items = switch (data_val) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var results: std.ArrayListUnmanaged(VmStatus) = .empty; + for (items) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + + // Only include QEMU VMs (not LXC containers) + const res_type = http.jsonStr(obj, "type", ""); + if (!std.mem.eql(u8, res_type, "qemu")) continue; + + const name = try self.allocator.dupe(u8, http.jsonStr(obj, "name", "unknown")); + const status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")); + const node = try self.allocator.dupe(u8, http.jsonStr(obj, "node", "unknown")); + + try results.append(self.allocator, .{ + .vmid = http.jsonInt(obj, "vmid", 0), + .name = name, + .status = status, + .node = node, + .maxdisk = http.jsonInt(obj, "maxdisk", 0), + }); + } + + return results.toOwnedSlice(self.allocator); + } + + /// Fetch status for a specific PVE node. + pub fn getNodeStatus(self: *ProxmoxClient, node: []const u8) !?NodeStatus { + const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/status", .{node}); + defer self.allocator.free(path); + + const body = self.client.get(path) catch return null; + defer self.allocator.free(body); + + var parsed = http.parseJsonResponse(self.allocator, body) catch return null; + defer parsed.deinit(); + + const data_val = switch (parsed.value) { + .object => |obj| obj.get("data") orelse return null, + else => return null, + }; + const obj = switch (data_val) { + .object => |o| o, + else => return null, + }; + + return .{ + .node = try self.allocator.dupe(u8, node), + .status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")), + .cpu = http.jsonFloat(obj, "cpu", 0), + .mem = http.jsonInt(obj, "mem", 0), + .maxmem = http.jsonInt(obj, "maxmem", 0), + .uptime = http.jsonInt(obj, "uptime", 0), + }; + } + + /// Fetch all storage pools across the PVE cluster. + pub fn getStoragePools(self: *ProxmoxClient) ![]StoragePool { + const body = self.client.get("/api2/json/cluster/resources?type=storage") catch { + return &.{}; + }; + defer self.allocator.free(body); + + var parsed = http.parseJsonResponse(self.allocator, body) catch { + return &.{}; + }; + defer parsed.deinit(); + + const data_val = switch (parsed.value) { + .object => |obj| obj.get("data") orelse return &.{}, + else => return &.{}, + }; + const items = switch (data_val) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var results: std.ArrayListUnmanaged(StoragePool) = .empty; + for (items) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + + const name = try self.allocator.dupe(u8, http.jsonStr(obj, "storage", "unknown")); + const node = try self.allocator.dupe(u8, http.jsonStr(obj, "node", "unknown")); + const pool_type = try self.allocator.dupe(u8, http.jsonStr(obj, "plugintype", http.jsonStr(obj, "type", "unknown"))); + const status = try self.allocator.dupe(u8, http.jsonStr(obj, "status", "unknown")); + + try results.append(self.allocator, .{ + .name = name, + .node = node, + .pool_type = pool_type, + .status = status, + .disk = http.jsonInt(obj, "disk", 0), + .maxdisk = http.jsonInt(obj, "maxdisk", 0), + }); + } + + return results.toOwnedSlice(self.allocator); + } + + /// List vzdump backups from a specific storage pool on a node. + pub fn listBackups(self: *ProxmoxClient, node: []const u8, storage: []const u8) ![]BackupEntry { + const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/storage/{s}/content?content=backup", .{ node, storage }); + defer self.allocator.free(path); + + const body = self.client.get(path) catch return &.{}; + defer self.allocator.free(body); + + var parsed = http.parseJsonResponse(self.allocator, body) catch return &.{}; + defer parsed.deinit(); + + const data_val = switch (parsed.value) { + .object => |obj| obj.get("data") orelse return &.{}, + else => return &.{}, + }; + const items = switch (data_val) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var results: std.ArrayListUnmanaged(BackupEntry) = .empty; + for (items) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + + const volid = try self.allocator.dupe(u8, http.jsonStr(obj, "volid", "")); + const format = try self.allocator.dupe(u8, http.jsonStr(obj, "format", "unknown")); + + try results.append(self.allocator, .{ + .volid = volid, + .node = try self.allocator.dupe(u8, node), + .storage = try self.allocator.dupe(u8, storage), + .size = http.jsonInt(obj, "size", 0), + .ctime = http.jsonInt(obj, "ctime", 0), + .vmid = http.jsonInt(obj, "vmid", 0), + .format = format, + }); + } + + return results.toOwnedSlice(self.allocator); + } + + /// Delete a backup by volume ID. + pub fn deleteBackup(self: *ProxmoxClient, node: []const u8, storage: []const u8, volid: []const u8) !void { + // URL-encode the volid (colons → %3A) + var encoded: std.ArrayListUnmanaged(u8) = .empty; + defer encoded.deinit(self.allocator); + for (volid) |c| { + if (c == ':') { + try encoded.appendSlice(self.allocator, "%3A"); + } else { + try encoded.append(self.allocator, c); + } + } + + const path = try std.fmt.allocPrint(self.allocator, "/api2/json/nodes/{s}/storage/{s}/content/{s}", .{ + node, storage, encoded.items, + }); + defer self.allocator.free(path); + + const body = try self.client.delete(path); + self.allocator.free(body); + } + + pub fn deinit(self: *ProxmoxClient) void { + self.client.deinit(); + } +}; diff --git a/tui/src/api/talos.zig b/tui/src/api/talos.zig new file mode 100644 index 0000000..3086e38 --- /dev/null +++ b/tui/src/api/talos.zig @@ -0,0 +1,170 @@ +const std = @import("std"); +const config = @import("../config.zig"); +const http = @import("http_client.zig"); +const Allocator = std.mem.Allocator; + +pub const TalosVersion = struct { + node: []const u8, + talos_version: []const u8, + kubernetes_version: []const u8, +}; + +pub const EtcdMember = struct { + hostname: []const u8, + id: u64, + is_learner: bool, +}; + +pub const TalosClient = struct { + allocator: Allocator, + config_path: []const u8, + context: []const u8, + + pub fn init(allocator: Allocator, talos_cfg: config.TalosConfig) TalosClient { + return .{ + .allocator = allocator, + .config_path = talos_cfg.config_path, + .context = talos_cfg.context, + }; + } + + /// Get Talos and Kubernetes version for a specific node. + /// Returns null if the node is unreachable. + pub fn getVersion(self: *TalosClient, node_ip: []const u8) ?TalosVersion { + const output = self.runTalosctl(&.{ + "version", "--nodes", node_ip, "--short", + }) orelse return null; + defer self.allocator.free(output); + + // Parse the JSON output. talosctl version -o json outputs messages array. + var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ + .ignore_unknown_fields = true, + .allocate = .alloc_always, + }) catch return null; + defer parsed.deinit(); + + // talosctl version -o json structure: + // {"messages":[{"metadata":{"hostname":"..."},"version":{"tag":"v1.9.x","...":"..."}}]} + const root = switch (parsed.value) { + .object => |obj| obj, + else => return null, + }; + + const messages = switch (root.get("messages") orelse return null) { + .array => |arr| arr.items, + else => return null, + }; + if (messages.len == 0) return null; + + const msg = switch (messages[0]) { + .object => |obj| obj, + else => return null, + }; + + // Extract version info + const version_obj = switch (msg.get("version") orelse return null) { + .object => |obj| obj, + else => return null, + }; + + const talos_ver = http.jsonStr(version_obj, "tag", "unknown"); + // Kubernetes version is typically in a separate field or needs a different query + // For now extract what's available + const k8s_ver = http.jsonStr(version_obj, "kubernetes_version", "-"); + + return .{ + .node = self.allocator.dupe(u8, node_ip) catch return null, + .talos_version = self.allocator.dupe(u8, talos_ver) catch return null, + .kubernetes_version = self.allocator.dupe(u8, k8s_ver) catch return null, + }; + } + + /// Get etcd cluster membership info. + /// Returns empty slice if unreachable. + pub fn getEtcdMembers(self: *TalosClient) []EtcdMember { + const output = self.runTalosctl(&.{"etcd", "members"}) orelse return &.{}; + defer self.allocator.free(output); + + var parsed = std.json.parseFromSlice(std.json.Value, self.allocator, output, .{ + .ignore_unknown_fields = true, + .allocate = .alloc_always, + }) catch return &.{}; + defer parsed.deinit(); + + const root = switch (parsed.value) { + .object => |obj| obj, + else => return &.{}, + }; + + const messages = switch (root.get("messages") orelse return &.{}) { + .array => |arr| arr.items, + else => return &.{}, + }; + if (messages.len == 0) return &.{}; + + const msg = switch (messages[0]) { + .object => |obj| obj, + else => return &.{}, + }; + + const members = switch (msg.get("members") orelse return &.{}) { + .array => |arr| arr.items, + else => return &.{}, + }; + + var results: std.ArrayListUnmanaged(EtcdMember) = .empty; + for (members) |item| { + const obj = switch (item) { + .object => |o| o, + else => continue, + }; + results.append(self.allocator, .{ + .hostname = self.allocator.dupe(u8, http.jsonStr(obj, "hostname", "unknown")) catch continue, + .id = @intCast(http.jsonInt(obj, "id", 0)), + .is_learner = blk: { + const val = obj.get("is_learner") orelse break :blk false; + break :blk switch (val) { + .bool => |b| b, + else => false, + }; + }, + }) catch continue; + } + + return results.toOwnedSlice(self.allocator) catch &.{}; + } + + /// Run a talosctl command with standard flags and return stdout. + /// Returns null on any failure. + fn runTalosctl(self: *TalosClient, extra_args: []const []const u8) ?[]const u8 { + var argv: std.ArrayListUnmanaged([]const u8) = .empty; + defer argv.deinit(self.allocator); + + argv.append(self.allocator, "talosctl") catch return null; + argv.appendSlice(self.allocator, extra_args) catch return null; + argv.appendSlice(self.allocator, &.{ + "--talosconfig", self.config_path, + "--context", self.context, + "-o", "json", + }) catch return null; + + const result = std.process.Child.run(.{ + .allocator = self.allocator, + .argv = argv.items, + .max_output_bytes = 512 * 1024, + }) catch return null; + defer self.allocator.free(result.stderr); + + const term = result.term; + if (term == .Exited and term.Exited == 0) { + return result.stdout; + } + + self.allocator.free(result.stdout); + return null; + } + + pub fn deinit(self: *TalosClient) void { + _ = self; + } +}; diff --git a/tui/src/app.zig b/tui/src/app.zig new file mode 100644 index 0000000..9af3149 --- /dev/null +++ b/tui/src/app.zig @@ -0,0 +1,485 @@ +const std = @import("std"); +const vaxis = @import("vaxis"); +const config = @import("config.zig"); +const poll = @import("poll.zig"); +const ClusterView = @import("views/cluster.zig").ClusterView; +const StorageView = @import("views/storage.zig").StorageView; +const backups_view = @import("views/backups.zig"); +const BackupView = backups_view.BackupView; +const DeleteAction = backups_view.DeleteAction; +const PerformanceView = @import("views/performance.zig").PerformanceView; +const proxmox_api = @import("api/proxmox.zig"); + +pub const Event = union(enum) { + key_press: vaxis.Key, + key_release: vaxis.Key, + mouse: vaxis.Mouse, + mouse_leave, + focus_in, + focus_out, + paste_start, + paste_end, + paste: []const u8, + color_report: vaxis.Color.Report, + color_scheme: vaxis.Color.Scheme, + winsize: vaxis.Winsize, + cap_kitty_keyboard, + cap_kitty_graphics, + cap_rgb, + cap_sgr_pixels, + cap_unicode, + cap_da1, + cap_color_scheme_updates, + cap_multi_cursor, + // Custom events + data_refresh, +}; + +pub const View = enum(u8) { + cluster = 0, + storage = 1, + backups = 2, + performance = 3, + + pub fn label(self: View) []const u8 { + return switch (self) { + .cluster => " 1:Cluster ", + .storage => " 2:Storage ", + .backups => " 3:Backups ", + .performance => " 4:Perf ", + }; + } +}; + +const min_width: u16 = 80; +const min_height: u16 = 24; + +pub const App = struct { + vx: vaxis.Vaxis, + tty: vaxis.Tty, + loop: vaxis.Loop(Event), + cfg: config.Config, + active_view: View = .cluster, + show_help: bool = false, + should_quit: bool = false, + alloc: std.mem.Allocator, + + // Cluster view + polling + cluster_view: ClusterView, + cluster_state: *poll.ClusterState, + // Storage view + storage_view: StorageView, + storage_state: *poll.StorageState, + // Backup view + backup_view: BackupView, + backup_state: *poll.BackupState, + // Performance view + perf_view: PerformanceView, + perf_state: *poll.PerfState, + // Poller (shared) + poller: *poll.Poller, + + tty_buf: [4096]u8 = undefined, + + pub fn init(alloc: std.mem.Allocator, cfg: config.Config) !App { + const state = try alloc.create(poll.ClusterState); + state.* = poll.ClusterState.init(alloc); + + const storage_state = try alloc.create(poll.StorageState); + storage_state.* = poll.StorageState.init(alloc); + + const backup_state = try alloc.create(poll.BackupState); + backup_state.* = poll.BackupState.init(alloc); + + const perf_state = try alloc.create(poll.PerfState); + perf_state.* = poll.PerfState.init(alloc); + + const poller = try alloc.create(poll.Poller); + // cfg pointer set in run() after App is at its final address + poller.* = poll.Poller.init(alloc, state, storage_state, backup_state, perf_state, undefined, cfg.tui_settings.refresh_interval_ms); + + var app: App = .{ + .vx = try vaxis.init(alloc, .{}), + .tty = undefined, + .loop = undefined, + .cfg = cfg, + .alloc = alloc, + .cluster_view = ClusterView.init(), + .cluster_state = state, + .storage_view = StorageView.init(cfg.tui_settings.warn_threshold, cfg.tui_settings.crit_threshold), + .storage_state = storage_state, + .backup_view = BackupView.init(cfg.tui_settings.stale_days), + .backup_state = backup_state, + .perf_view = PerformanceView.init(), + .perf_state = perf_state, + .poller = poller, + }; + app.tty = try vaxis.Tty.init(&app.tty_buf); + app.loop = .{ .tty = &app.tty, .vaxis = &app.vx }; + return app; + } + + pub fn deinit(self: *App, alloc: std.mem.Allocator) void { + self.poller.stop(); + self.loop.stop(); + self.vx.deinit(alloc, self.tty.writer()); + self.tty.deinit(); + self.cluster_state.deinit(); + self.storage_state.deinit(); + self.backup_state.deinit(); + self.perf_state.deinit(); + alloc.destroy(self.cluster_state); + alloc.destroy(self.storage_state); + alloc.destroy(self.backup_state); + alloc.destroy(self.perf_state); + alloc.destroy(self.poller); + } + + pub fn run(self: *App, alloc: std.mem.Allocator) !void { + // Now that self is at its final address, wire up the config pointer + self.poller.cfg = &self.cfg; + + try self.loop.init(); + try self.loop.start(); + + // Start background polling + try self.poller.start(); + + try self.vx.enterAltScreen(self.tty.writer()); + try self.vx.queryTerminal(self.tty.writer(), 1_000_000_000); + + while (!self.should_quit) { + const event = self.loop.nextEvent(); + try self.handleEvent(alloc, event); + try self.draw(); + try self.vx.render(self.tty.writer()); + } + } + + fn handleEvent(self: *App, alloc: std.mem.Allocator, event: Event) !void { + switch (event) { + .key_press => |key| self.handleKey(key), + .winsize => |ws| try self.vx.resize(alloc, self.tty.writer(), ws), + .data_refresh => {}, // Just triggers redraw + else => {}, + } + } + + fn handleKey(self: *App, key: vaxis.Key) void { + // Help overlay dismissal + if (self.show_help) { + if (key.matches('?', .{}) or key.matches(vaxis.Key.escape, .{})) { + self.show_help = false; + } + return; + } + + // Global keys + if (key.matches('q', .{})) { + self.should_quit = true; + return; + } + if (key.matches('?', .{})) { + self.show_help = true; + return; + } + if (key.matches('r', .{})) { + self.poller.triggerRefresh(); + return; + } + + // View switching: 1-4 + if (key.matches('1', .{})) { + self.active_view = .cluster; + } else if (key.matches('2', .{})) { + self.active_view = .storage; + } else if (key.matches('3', .{})) { + self.active_view = .backups; + } else if (key.matches('4', .{})) { + self.active_view = .performance; + } else if (key.matches(vaxis.Key.tab, .{})) { + self.cycleView(); + } else if (key.matches(vaxis.Key.tab, .{ .shift = true })) { + self.cycleViewBack(); + } else { + // Delegate to active view + switch (self.active_view) { + .cluster => self.cluster_view.handleKey(key), + .storage => self.storage_view.handleKey(key), + .backups => self.backup_view.handleKey(key), + .performance => self.perf_view.handleKey(key), + } + } + } + + fn cycleView(self: *App) void { + const cur = @intFromEnum(self.active_view); + self.active_view = @enumFromInt((cur + 1) % 4); + } + + fn cycleViewBack(self: *App) void { + const cur = @intFromEnum(self.active_view); + self.active_view = @enumFromInt((cur + 3) % 4); + } + + fn draw(self: *App) !void { + const win = self.vx.window(); + win.clear(); + + if (win.width < min_width or win.height < min_height) { + self.drawMinSizeMessage(win); + return; + } + + // Top bar (row 0) + const top_bar = win.child(.{ .height = 1 }); + self.drawTopBar(top_bar); + + // Status bar (last row) + const status_bar = win.child(.{ + .y_off = @intCast(win.height -| 1), + .height = 1, + }); + self.drawStatusBar(status_bar); + + // Content area + const content = win.child(.{ + .y_off = 1, + .height = win.height -| 2, + }); + self.drawContent(content); + + // Help overlay on top + if (self.show_help) { + self.drawHelpOverlay(win); + } + } + + fn drawMinSizeMessage(self: *App, win: vaxis.Window) void { + _ = self; + const msg = "Terminal too small (min 80x24)"; + const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + const row: u16 = win.height / 2; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 1 } } }}, .{ + .col_offset = col, + .row_offset = row, + }); + } + + fn drawTopBar(self: *App, win: vaxis.Window) void { + win.fill(.{ .style = .{ .bg = .{ .index = 8 } } }); + + var col: u16 = 0; + const views = [_]View{ .cluster, .storage, .backups, .performance }; + for (views) |view| { + const lbl = view.label(); + const is_active = (view == self.active_view); + const style: vaxis.Style = if (is_active) + .{ .fg = .{ .index = 0 }, .bg = .{ .index = 4 }, .bold = true } + else + .{ .fg = .{ .index = 7 }, .bg = .{ .index = 8 } }; + + _ = win.print(&.{.{ .text = lbl, .style = style }}, .{ + .col_offset = col, + .wrap = .none, + }); + col += @intCast(lbl.len); + } + + const title = " vitui "; + if (win.width > title.len + col) { + const title_col: u16 = win.width - @as(u16, @intCast(title.len)); + _ = win.print(&.{.{ .text = title, .style = .{ + .fg = .{ .index = 6 }, + .bg = .{ .index = 8 }, + .bold = true, + } }}, .{ + .col_offset = title_col, + .wrap = .none, + }); + } + } + + fn drawStatusBar(self: *App, win: vaxis.Window) void { + win.fill(.{ .style = .{ .bg = .{ .index = 8 } } }); + + const bar_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 8 } }; + + // Left: keybinding hints + const hint = " q:quit ?:help 1-4:views r:refresh j/k:nav "; + _ = win.print(&.{.{ .text = hint, .style = bar_style }}, .{ .wrap = .none }); + + // Right: refresh status + var buf: [64]u8 = undefined; + const status_text = blk: { + if (self.cluster_state.isLoading()) { + break :blk "Loading..."; + } + const last = self.cluster_state.getLastRefresh(); + if (last == 0) break :blk ""; + const now = std.time.timestamp(); + const ago = now - last; + if (ago < 0) break :blk ""; + break :blk std.fmt.bufPrint(&buf, " {d}s ago ", .{ago}) catch ""; + }; + if (status_text.len > 0 and win.width > status_text.len + hint.len) { + const status_col: u16 = win.width - @as(u16, @intCast(status_text.len)); + _ = win.print(&.{.{ .text = status_text, .style = bar_style }}, .{ + .col_offset = status_col, + .wrap = .none, + }); + } + } + + fn drawContent(self: *App, win: vaxis.Window) void { + switch (self.active_view) { + .cluster => { + const rows = self.cluster_state.getRows(); + if (self.cluster_state.isLoading() and rows.len == 0) { + self.drawPlaceholder(win, "Loading cluster data..."); + } else { + self.cluster_view.draw(self.alloc, win, rows); + } + }, + .storage => { + const pools = self.storage_state.getPools(); + const disks = self.storage_state.getVmDisks(); + if (self.storage_state.isLoading() and pools.len == 0) { + self.drawPlaceholder(win, "Loading storage data..."); + } else { + self.storage_view.draw(self.alloc, win, pools, disks); + } + }, + .backups => { + const backups = self.backup_state.getBackups(); + const k8s_backups = self.backup_state.getK8sBackups(); + if (self.backup_state.isLoading() and backups.len == 0 and k8s_backups.len == 0) { + self.drawPlaceholder(win, "Loading backup data..."); + } else { + self.backup_view.draw(win, backups, k8s_backups); + + // Check for pending delete action + if (self.backup_view.consumeDeleteAction(backups)) |action| { + self.executeDelete(action); + } + } + }, + .performance => { + const hosts = self.perf_state.getHosts(); + const pods = self.perf_state.getPods(); + const available = self.perf_state.isMetricsAvailable(); + if (self.perf_state.isLoading() and hosts.len == 0) { + self.drawPlaceholder(win, "Loading performance data..."); + } else { + self.perf_view.draw(self.alloc, win, hosts, pods, available); + } + }, + } + } + + fn executeDelete(self: *App, action: DeleteAction) void { + // Find matching PVE cluster config for this node + for (self.cfg.proxmox.clusters) |pc| { + var client = proxmox_api.ProxmoxClient.init(self.alloc, pc); + defer client.deinit(); + client.deleteBackup(action.node, action.storage, action.volid) catch continue; + // Trigger refresh to show updated list + self.poller.triggerRefresh(); + return; + } + } + + fn drawPlaceholder(self: *App, win: vaxis.Window, label: []const u8) void { + _ = self; + const col: u16 = if (win.width > label.len) (win.width - @as(u16, @intCast(label.len))) / 2 else 0; + const row: u16 = win.height / 2; + _ = win.print(&.{.{ .text = label, .style = .{ + .fg = .{ .index = 6 }, + .bold = true, + } }}, .{ + .col_offset = col, + .row_offset = row, + .wrap = .none, + }); + } + + fn drawHelpOverlay(self: *App, win: vaxis.Window) void { + const box_w: u16 = 48; + const box_h: u16 = 22; + const x: i17 = @intCast(if (win.width > box_w) (win.width - box_w) / 2 else 0); + const y: i17 = @intCast(if (win.height > box_h) (win.height - box_h) / 2 else 0); + + const help_win = win.child(.{ + .x_off = x, + .y_off = y, + .width = box_w, + .height = box_h, + .border = .{ .where = .all, .style = .{ .fg = .{ .index = 4 } } }, + }); + + help_win.fill(.{ .style = .{ .bg = .{ .index = 0 } } }); + + const title_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 0 }, .bold = true }; + const text_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 0 } }; + const section_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 0 }, .bold = true }; + + var row: u16 = 0; + const w = help_win; + + _ = w.print(&.{.{ .text = " Keybindings", .style = title_style }}, .{ .row_offset = row, .wrap = .none }); + row += 1; + + const global = [_][]const u8{ + " q Quit", + " ? Toggle help", + " 1-4 Switch view", + " Tab/S-Tab Next/Prev view", + " j/k Navigate down/up", + " g/G Top/Bottom", + " r Refresh all data", + " Esc Close overlay", + }; + for (global) |line| { + row += 1; + if (row >= w.height) break; + _ = w.print(&.{.{ .text = line, .style = text_style }}, .{ .row_offset = row, .wrap = .none }); + } + + // View-specific hints + row += 1; + if (row < w.height) { + const view_title = switch (self.active_view) { + .cluster => " Cluster View", + .storage => " Storage View", + .backups => " Backups View", + .performance => " Performance View", + }; + _ = w.print(&.{.{ .text = view_title, .style = section_style }}, .{ .row_offset = row, .wrap = .none }); + row += 1; + } + + const view_lines: []const []const u8 = switch (self.active_view) { + .cluster => &.{ + " (no extra keys)", + }, + .storage => &.{ + " Tab Switch pools/disks", + }, + .backups => &.{ + " d Delete selected backup", + " / Search/filter", + " Esc Clear filter", + }, + .performance => &.{ + " s Cycle sort column", + " S Reverse sort direction", + " n Cycle namespace filter", + }, + }; + for (view_lines) |line| { + if (row >= w.height) break; + _ = w.print(&.{.{ .text = line, .style = text_style }}, .{ .row_offset = row, .wrap = .none }); + row += 1; + } + } +}; diff --git a/tui/src/config.zig b/tui/src/config.zig new file mode 100644 index 0000000..d5f006f --- /dev/null +++ b/tui/src/config.zig @@ -0,0 +1,300 @@ +const std = @import("std"); +const yaml = @import("yaml"); +const Allocator = std.mem.Allocator; + +const Value = yaml.Yaml.Value; +const Map = yaml.Yaml.Map; + +// ── Config types ───────────────────────────────────────────────────── + +pub const Config = struct { + version: []const u8, + proxmox: ProxmoxConfig, + talos: TalosConfig, + clusters: []const ClusterConfig, + tui_settings: TuiSettings, +}; + +pub const ProxmoxConfig = struct { + clusters: []const ProxmoxCluster, +}; + +pub const ProxmoxCluster = struct { + name: []const u8, + endpoint: []const u8, + token_id: []const u8, + token_secret: []const u8, + tls_verify: bool, +}; + +pub const TalosConfig = struct { + config_path: []const u8, + context: []const u8, +}; + +pub const ClusterConfig = struct { + name: []const u8, + proxmox_cluster: []const u8, + endpoint: []const u8, + nodes: []const NodeConfig, +}; + +pub const NodeConfig = struct { + name: []const u8, + role: []const u8, + proxmox_vmid: i64, + proxmox_node: []const u8, + ip: []const u8, +}; + +pub const TuiSettings = struct { + warn_threshold: u8 = 10, + crit_threshold: u8 = 5, + stale_days: u32 = 30, + refresh_interval_ms: u64 = 30_000, +}; + +// ── Parsing helpers ────────────────────────────────────────────────── + +fn getStr(map: Map, key: []const u8) ![]const u8 { + const val = map.get(key) orelse return error.ConfigParseFailed; + return val.asScalar() orelse return error.ConfigParseFailed; +} + +fn getStrOr(map: Map, key: []const u8, default: []const u8) []const u8 { + const val = map.get(key) orelse return default; + return val.asScalar() orelse default; +} + +fn getBool(map: Map, key: []const u8, default: bool) bool { + const val = map.get(key) orelse return default; + if (val == .boolean) return val.boolean; + const s = val.asScalar() orelse return default; + if (std.mem.eql(u8, s, "true") or std.mem.eql(u8, s, "yes")) return true; + if (std.mem.eql(u8, s, "false") or std.mem.eql(u8, s, "no")) return false; + return default; +} + +fn getInt(map: Map, key: []const u8, default: i64) i64 { + const val = map.get(key) orelse return default; + const s = val.asScalar() orelse return default; + return std.fmt.parseInt(i64, s, 10) catch default; +} + +fn getList(map: Map, key: []const u8) ?[]Value { + const val = map.get(key) orelse return null; + return val.asList(); +} + +fn getMap(map: Map, key: []const u8) ?Map { + const val = map.get(key) orelse return null; + return val.asMap(); +} + +// ── Config loading ─────────────────────────────────────────────────── + +pub fn load(alloc: Allocator, path: []const u8) !Config { + const raw = std.fs.cwd().readFileAlloc(alloc, path, 1024 * 1024) catch |err| { + std.log.err("failed to read config file '{s}': {}", .{ path, err }); + return error.ConfigReadFailed; + }; + defer alloc.free(raw); + + const expanded = expandEnvVars(alloc, raw) catch |err| { + std.log.err("failed to expand environment variables: {}", .{err}); + return err; + }; + defer alloc.free(expanded); + + var y: yaml.Yaml = .{ .source = expanded }; + y.load(alloc) catch |err| { + if (err == error.ParseFailure) { + std.log.err("invalid YAML in config file", .{}); + } + return error.ConfigParseFailed; + }; + defer y.deinit(alloc); + + if (y.docs.items.len == 0) { + std.log.err("empty config file", .{}); + return error.ConfigParseFailed; + } + + const root_map = y.docs.items[0].asMap() orelse { + std.log.err("config root must be a mapping", .{}); + return error.ConfigParseFailed; + }; + + return parseConfig(alloc, root_map); +} + +fn parseConfig(alloc: Allocator, root: Map) !Config { + const version = try getStr(root, "version"); + if (!std.mem.eql(u8, version, "1")) { + std.log.err("unsupported config version: {s}", .{version}); + return error.UnsupportedVersion; + } + + // Parse proxmox section + const pve_map = getMap(root, "proxmox") orelse { + std.log.err("missing 'proxmox' section", .{}); + return error.ConfigParseFailed; + }; + const pve_clusters_list = getList(pve_map, "clusters") orelse { + std.log.err("missing 'proxmox.clusters'", .{}); + return error.ConfigParseFailed; + }; + var pve_clusters = try alloc.alloc(ProxmoxCluster, pve_clusters_list.len); + for (pve_clusters_list, 0..) |item, i| { + const m = item.asMap() orelse return error.ConfigParseFailed; + pve_clusters[i] = .{ + .name = try getStr(m, "name"), + .endpoint = try getStr(m, "endpoint"), + .token_id = try getStr(m, "token_id"), + .token_secret = try getStr(m, "token_secret"), + .tls_verify = getBool(m, "tls_verify", true), + }; + } + + // Parse talos section + const talos_map = getMap(root, "talos") orelse { + std.log.err("missing 'talos' section", .{}); + return error.ConfigParseFailed; + }; + const talos = TalosConfig{ + .config_path = try getStr(talos_map, "config_path"), + .context = try getStr(talos_map, "context"), + }; + + // Parse clusters section + const clusters_list = getList(root, "clusters") orelse { + std.log.err("missing 'clusters' section", .{}); + return error.ConfigParseFailed; + }; + var clusters = try alloc.alloc(ClusterConfig, clusters_list.len); + for (clusters_list, 0..) |item, i| { + const m = item.asMap() orelse return error.ConfigParseFailed; + const nodes_list = getList(m, "nodes") orelse return error.ConfigParseFailed; + var nodes = try alloc.alloc(NodeConfig, nodes_list.len); + for (nodes_list, 0..) |n, j| { + const nm = n.asMap() orelse return error.ConfigParseFailed; + nodes[j] = .{ + .name = try getStr(nm, "name"), + .role = try getStr(nm, "role"), + .proxmox_vmid = getInt(nm, "proxmox_vmid", 0), + .proxmox_node = try getStr(nm, "proxmox_node"), + .ip = try getStr(nm, "ip"), + }; + } + clusters[i] = .{ + .name = try getStr(m, "name"), + .proxmox_cluster = try getStr(m, "proxmox_cluster"), + .endpoint = try getStr(m, "endpoint"), + .nodes = nodes, + }; + } + + // Parse optional tui section + var tui_settings = TuiSettings{}; + if (getMap(root, "tui")) |tui_map| { + if (getMap(tui_map, "storage")) |storage| { + const w = getInt(storage, "warn_threshold", 10); + const c = getInt(storage, "crit_threshold", 5); + tui_settings.warn_threshold = @intCast(@max(0, @min(100, w))); + tui_settings.crit_threshold = @intCast(@max(0, @min(100, c))); + } + if (getMap(tui_map, "backups")) |backups| { + const d = getInt(backups, "stale_days", 30); + tui_settings.stale_days = @intCast(@max(0, d)); + } + const ri = getStrOr(tui_map, "refresh_interval", "30s"); + tui_settings.refresh_interval_ms = parseDurationMs(ri); + } + + return .{ + .version = version, + .proxmox = .{ .clusters = pve_clusters }, + .talos = talos, + .clusters = clusters, + .tui_settings = tui_settings, + }; +} + +// ── Utility functions ──────────────────────────────────────────────── + +/// Parse a duration string like "5m", "30s", "1h" into milliseconds. +pub fn parseDurationMs(s: []const u8) u64 { + if (s.len == 0) return 30_000; + const suffix = s[s.len - 1]; + const num_str = s[0 .. s.len - 1]; + const num = std.fmt.parseInt(u64, num_str, 10) catch return 30_000; + return switch (suffix) { + 's' => num * 1_000, + 'm' => num * 60_000, + 'h' => num * 3_600_000, + else => 30_000, + }; +} + +/// Expand `${VAR}` references in a string using environment variables. +pub fn expandEnvVars(alloc: Allocator, input: []const u8) ![]const u8 { + var result: std.ArrayListUnmanaged(u8) = .empty; + errdefer result.deinit(alloc); + + var i: usize = 0; + while (i < input.len) { + if (i + 1 < input.len and input[i] == '$' and input[i + 1] == '{') { + const start = i + 2; + const end = std.mem.indexOfScalarPos(u8, input, start, '}') orelse { + return error.UnterminatedEnvVar; + }; + const var_name = input[start..end]; + const val = std.posix.getenv(var_name) orelse { + std.log.err("environment variable not set: {s}", .{var_name}); + return error.EnvVarNotSet; + }; + try result.appendSlice(alloc, val); + i = end + 1; + } else { + try result.append(alloc, input[i]); + i += 1; + } + } + return result.toOwnedSlice(alloc); +} + +/// Discover the config file path using standard search order. +pub fn discover() ![]const u8 { + if (std.posix.getenv("PVT_CONFIG")) |p| { + std.fs.cwd().access(p, .{}) catch {}; + return p; + } + std.fs.cwd().access("pvt.yaml", .{}) catch return error.ConfigNotFound; + return "pvt.yaml"; +} + +// ── Tests ──────────────────────────────────────────────────────────── + +test "parseDurationMs" { + const expect = std.testing.expect; + try expect(parseDurationMs("30s") == 30_000); + try expect(parseDurationMs("5m") == 300_000); + try expect(parseDurationMs("1h") == 3_600_000); + try expect(parseDurationMs("") == 30_000); + try expect(parseDurationMs("bad") == 30_000); +} + +test "expandEnvVars basic" { + const alloc = std.testing.allocator; + const result = try expandEnvVars(alloc, "hello world"); + defer alloc.free(result); + try std.testing.expectEqualStrings("hello world", result); +} + +test "TuiSettings defaults" { + const s = TuiSettings{}; + try std.testing.expect(s.warn_threshold == 10); + try std.testing.expect(s.crit_threshold == 5); + try std.testing.expect(s.stale_days == 30); + try std.testing.expect(s.refresh_interval_ms == 30_000); +} diff --git a/tui/src/main.zig b/tui/src/main.zig new file mode 100644 index 0000000..1e4832b --- /dev/null +++ b/tui/src/main.zig @@ -0,0 +1,70 @@ +const std = @import("std"); +const config = @import("config.zig"); +const App = @import("app.zig").App; + +pub fn main() !void { + var gpa_impl: std.heap.GeneralPurposeAllocator(.{}) = .init; + defer _ = gpa_impl.deinit(); + const alloc = gpa_impl.allocator(); + + const config_path = parseArgs() catch |err| { + if (err == error.HelpRequested) return; + return err; + }; + + const cfg = config.load(alloc, config_path) catch |err| { + std.log.err("configuration error: {}", .{err}); + std.process.exit(1); + }; + + var app = App.init(alloc, cfg) catch |err| { + std.log.err("failed to initialize TUI: {}", .{err}); + std.process.exit(1); + }; + defer app.deinit(alloc); + + app.run(alloc) catch |err| { + std.log.err("runtime error: {}", .{err}); + std.process.exit(1); + }; +} + +fn parseArgs() ![]const u8 { + var args = std.process.args(); + _ = args.skip(); // program name + + while (args.next()) |arg| { + if (std.mem.eql(u8, arg, "--config") or std.mem.eql(u8, arg, "-c")) { + return args.next() orelse { + std.log.err("--config requires a path argument", .{}); + return error.MissingArgument; + }; + } + if (std.mem.eql(u8, arg, "--help") or std.mem.eql(u8, arg, "-h")) { + _ = std.posix.write(std.posix.STDOUT_FILENO, + \\vitui - TUI for pvt cluster management + \\ + \\Usage: vitui [options] + \\ + \\Options: + \\ -c, --config Path to pvt.yaml config file + \\ -h, --help Show this help message + \\ + \\If --config is not specified, vitui searches: + \\ $PVT_CONFIG, ./pvt.yaml, ~/.config/pvt/config.yaml + \\ + ) catch {}; + return error.HelpRequested; + } + } + + // No --config flag: try to discover + return config.discover() catch { + std.log.err("no config file found (use --config or set $PVT_CONFIG)", .{}); + return error.MissingArgument; + }; +} + +test { + _ = config; +} diff --git a/tui/src/poll.zig b/tui/src/poll.zig new file mode 100644 index 0000000..2adbcb2 --- /dev/null +++ b/tui/src/poll.zig @@ -0,0 +1,957 @@ +const std = @import("std"); +const config = @import("config.zig"); +const proxmox = @import("api/proxmox.zig"); +const talos = @import("api/talos.zig"); +const kubernetes = @import("api/kubernetes.zig"); +const metrics_api = @import("api/metrics.zig"); +const Allocator = std.mem.Allocator; + +/// A single row in the cluster view table. +/// All fields are display-ready strings. +pub const NodeRow = struct { + name: []const u8, + role: []const u8, + ip: []const u8, + pve_node: []const u8, + vmid: []const u8, + talos_ver: []const u8, + k8s_ver: []const u8, + etcd: []const u8, + health: []const u8, +}; + +/// A single row in the storage pools table. +pub const StoragePoolRow = struct { + name: []const u8, + node: []const u8, + pool_type: []const u8, + used_str: []const u8, + total_str: []const u8, + status: []const u8, + usage_pct: f64, +}; + +/// A single row in the VM disks table. +pub const VmDiskRow = struct { + vm_name: []const u8, + vmid: []const u8, + pool: []const u8, + size_str: []const u8, + size_bytes: i64, +}; + +/// Format bytes into a human-readable string (e.g., "42.1 GiB"). +pub fn formatBytes(alloc: Allocator, bytes: i64) []const u8 { + const fb: f64 = @floatFromInt(@max(bytes, 0)); + if (fb >= 1024.0 * 1024.0 * 1024.0 * 1024.0) { + return std.fmt.allocPrint(alloc, "{d:.1} TiB", .{fb / (1024.0 * 1024.0 * 1024.0 * 1024.0)}) catch "? TiB"; + } else if (fb >= 1024.0 * 1024.0 * 1024.0) { + return std.fmt.allocPrint(alloc, "{d:.1} GiB", .{fb / (1024.0 * 1024.0 * 1024.0)}) catch "? GiB"; + } else if (fb >= 1024.0 * 1024.0) { + return std.fmt.allocPrint(alloc, "{d:.1} MiB", .{fb / (1024.0 * 1024.0)}) catch "? MiB"; + } else { + return std.fmt.allocPrint(alloc, "{d:.0} KiB", .{fb / 1024.0}) catch "? KiB"; + } +} + +/// Thread-safe shared state for storage view data. +pub const StorageState = struct { + mutex: std.Thread.Mutex = .{}, + pools: []StoragePoolRow = &.{}, + vm_disks: []VmDiskRow = &.{}, + is_loading: bool = true, + last_refresh_epoch: i64 = 0, + allocator: Allocator, + + pub fn init(allocator: Allocator) StorageState { + return .{ .allocator = allocator }; + } + + pub fn swapData(self: *StorageState, new_pools: []StoragePoolRow, new_disks: []VmDiskRow) void { + self.mutex.lock(); + defer self.mutex.unlock(); + self.freeDataInternal(); + self.pools = new_pools; + self.vm_disks = new_disks; + self.is_loading = false; + self.last_refresh_epoch = std.time.timestamp(); + } + + pub fn getPools(self: *StorageState) []StoragePoolRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.pools; + } + + pub fn getVmDisks(self: *StorageState) []VmDiskRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.vm_disks; + } + + pub fn isLoading(self: *StorageState) bool { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.is_loading; + } + + fn freeDataInternal(self: *StorageState) void { + for (self.pools) |row| { + self.allocator.free(row.name); + self.allocator.free(row.node); + self.allocator.free(row.pool_type); + self.allocator.free(row.used_str); + self.allocator.free(row.total_str); + self.allocator.free(row.status); + } + if (self.pools.len > 0) self.allocator.free(self.pools); + for (self.vm_disks) |row| { + self.allocator.free(row.vm_name); + self.allocator.free(row.vmid); + self.allocator.free(row.pool); + self.allocator.free(row.size_str); + } + if (self.vm_disks.len > 0) self.allocator.free(self.vm_disks); + } + + pub fn deinit(self: *StorageState) void { + self.freeDataInternal(); + } +}; + +/// A single row in the backups table. +pub const BackupRow = struct { + volid: []const u8, + node: []const u8, + storage: []const u8, + vm_name: []const u8, + vmid: []const u8, + size_str: []const u8, + date_str: []const u8, + age_days: u32, + is_stale: bool, +}; + +/// A single K8s backup row (VolSync/Velero). +pub const K8sBackupRow = struct { + name: []const u8, + namespace: []const u8, + source_type: []const u8, + status: []const u8, + schedule: []const u8, + last_run: []const u8, +}; + +/// Thread-safe shared state for backup view data. +pub const BackupState = struct { + mutex: std.Thread.Mutex = .{}, + backups: []BackupRow = &.{}, + k8s_backups: []K8sBackupRow = &.{}, + is_loading: bool = true, + last_refresh_epoch: i64 = 0, + allocator: Allocator, + + pub fn init(allocator: Allocator) BackupState { + return .{ .allocator = allocator }; + } + + pub fn swapData(self: *BackupState, new_backups: []BackupRow, new_k8s: []K8sBackupRow) void { + self.mutex.lock(); + defer self.mutex.unlock(); + self.freeDataInternal(); + self.backups = new_backups; + self.k8s_backups = new_k8s; + self.is_loading = false; + self.last_refresh_epoch = std.time.timestamp(); + } + + pub fn getBackups(self: *BackupState) []BackupRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.backups; + } + + pub fn getK8sBackups(self: *BackupState) []K8sBackupRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.k8s_backups; + } + + pub fn isLoading(self: *BackupState) bool { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.is_loading; + } + + fn freeDataInternal(self: *BackupState) void { + for (self.backups) |row| { + self.allocator.free(row.volid); + self.allocator.free(row.node); + self.allocator.free(row.storage); + self.allocator.free(row.vm_name); + self.allocator.free(row.vmid); + self.allocator.free(row.size_str); + self.allocator.free(row.date_str); + } + if (self.backups.len > 0) self.allocator.free(self.backups); + for (self.k8s_backups) |row| { + self.allocator.free(row.name); + self.allocator.free(row.namespace); + self.allocator.free(row.source_type); + self.allocator.free(row.status); + self.allocator.free(row.schedule); + self.allocator.free(row.last_run); + } + if (self.k8s_backups.len > 0) self.allocator.free(self.k8s_backups); + } + + pub fn deinit(self: *BackupState) void { + self.freeDataInternal(); + } +}; + +/// A single row in the host overview (PVE node metrics). +pub const HostRow = struct { + name: []const u8, + cpu_pct: f64, // 0-100 + mem_used_str: []const u8, + mem_total_str: []const u8, + mem_pct: f64, // 0-100 +}; + +/// A single row in the pod metrics table. +pub const PodMetricRow = struct { + pod: []const u8, + namespace: []const u8, + cpu_str: []const u8, // e.g. "0.125" + mem_str: []const u8, // e.g. "128.5 MiB" + net_rx_str: []const u8, // e.g. "1.2 KiB/s" + net_tx_str: []const u8, // e.g. "0.5 KiB/s" + cpu_cores: f64, // for sorting + mem_bytes: f64, // for sorting +}; + +/// Thread-safe shared state for performance view data. +pub const PerfState = struct { + mutex: std.Thread.Mutex = .{}, + hosts: []HostRow = &.{}, + pods: []PodMetricRow = &.{}, + metrics_available: bool = false, + is_loading: bool = true, + last_refresh_epoch: i64 = 0, + allocator: Allocator, + + pub fn init(allocator: Allocator) PerfState { + return .{ .allocator = allocator }; + } + + pub fn swapData(self: *PerfState, new_hosts: []HostRow, new_pods: []PodMetricRow, available: bool) void { + self.mutex.lock(); + defer self.mutex.unlock(); + self.freeDataInternal(); + self.hosts = new_hosts; + self.pods = new_pods; + self.metrics_available = available; + self.is_loading = false; + self.last_refresh_epoch = std.time.timestamp(); + } + + pub fn getHosts(self: *PerfState) []HostRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.hosts; + } + + pub fn getPods(self: *PerfState) []PodMetricRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.pods; + } + + pub fn isMetricsAvailable(self: *PerfState) bool { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.metrics_available; + } + + pub fn isLoading(self: *PerfState) bool { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.is_loading; + } + + fn freeDataInternal(self: *PerfState) void { + for (self.hosts) |row| { + self.allocator.free(row.name); + self.allocator.free(row.mem_used_str); + self.allocator.free(row.mem_total_str); + } + if (self.hosts.len > 0) self.allocator.free(self.hosts); + for (self.pods) |row| { + self.allocator.free(row.pod); + self.allocator.free(row.namespace); + self.allocator.free(row.cpu_str); + self.allocator.free(row.mem_str); + self.allocator.free(row.net_rx_str); + self.allocator.free(row.net_tx_str); + } + if (self.pods.len > 0) self.allocator.free(self.pods); + } + + pub fn deinit(self: *PerfState) void { + self.freeDataInternal(); + } +}; + +/// Format a rate in bytes/sec into a human-readable string. +pub fn formatRate(alloc: Allocator, bytes_per_sec: f64) []const u8 { + if (bytes_per_sec >= 1024.0 * 1024.0) { + return std.fmt.allocPrint(alloc, "{d:.1} MiB/s", .{bytes_per_sec / (1024.0 * 1024.0)}) catch "? MiB/s"; + } else if (bytes_per_sec >= 1024.0) { + return std.fmt.allocPrint(alloc, "{d:.1} KiB/s", .{bytes_per_sec / 1024.0}) catch "? KiB/s"; + } else { + return std.fmt.allocPrint(alloc, "{d:.0} B/s", .{bytes_per_sec}) catch "? B/s"; + } +} + +/// Format an epoch timestamp into "YYYY-MM-DD HH:MM". +pub fn formatEpoch(alloc: Allocator, epoch: i64) []const u8 { + const es = std.time.epoch.EpochSeconds{ .secs = @intCast(@max(0, epoch)) }; + const day = es.getEpochDay(); + const yd = day.calculateYearDay(); + const md = yd.calculateMonthDay(); + const ds = es.getDaySeconds(); + return std.fmt.allocPrint(alloc, "{d:0>4}-{d:0>2}-{d:0>2} {d:0>2}:{d:0>2}", .{ + yd.year, + md.month.numeric(), + md.day_index + 1, + ds.getHoursIntoDay(), + ds.getMinutesIntoHour(), + }) catch "unknown"; +} + +/// Thread-safe shared state for cluster view data. +pub const ClusterState = struct { + mutex: std.Thread.Mutex = .{}, + rows: []NodeRow = &.{}, + is_loading: bool = true, + error_msg: ?[]const u8 = null, + last_refresh_epoch: i64 = 0, + allocator: Allocator, + + pub fn init(allocator: Allocator) ClusterState { + return .{ .allocator = allocator }; + } + + /// Replace current rows with new data. Frees old rows under mutex. + pub fn swapRows(self: *ClusterState, new_rows: []NodeRow) void { + self.mutex.lock(); + defer self.mutex.unlock(); + + self.freeRowsInternal(); + self.rows = new_rows; + self.is_loading = false; + self.last_refresh_epoch = std.time.timestamp(); + } + + /// Get a snapshot of the current rows. Caller must NOT free. + /// Safe to read while mutex is not held — the pointer is stable + /// until the next swapRows call. + pub fn getRows(self: *ClusterState) []NodeRow { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.rows; + } + + pub fn getLastRefresh(self: *ClusterState) i64 { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.last_refresh_epoch; + } + + pub fn isLoading(self: *ClusterState) bool { + self.mutex.lock(); + defer self.mutex.unlock(); + return self.is_loading; + } + + fn freeRowsInternal(self: *ClusterState) void { + for (self.rows) |row| { + self.allocator.free(row.name); + self.allocator.free(row.role); + self.allocator.free(row.ip); + self.allocator.free(row.pve_node); + self.allocator.free(row.vmid); + self.allocator.free(row.talos_ver); + self.allocator.free(row.k8s_ver); + self.allocator.free(row.etcd); + self.allocator.free(row.health); + } + if (self.rows.len > 0) { + self.allocator.free(self.rows); + } + } + + pub fn deinit(self: *ClusterState) void { + self.freeRowsInternal(); + } +}; + +/// Background poller that fetches data from Proxmox and Talos APIs. +pub const Poller = struct { + state: *ClusterState, + storage_state: *StorageState, + backup_state: *BackupState, + perf_state: *PerfState, + cfg: *const config.Config, + interval_ns: u64, + should_stop: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), + force_refresh: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), + thread: ?std.Thread = null, + allocator: Allocator, + + pub fn init( + allocator: Allocator, + state: *ClusterState, + storage_state: *StorageState, + backup_state: *BackupState, + perf_state: *PerfState, + cfg: *const config.Config, + interval_ms: u64, + ) Poller { + return .{ + .state = state, + .storage_state = storage_state, + .backup_state = backup_state, + .perf_state = perf_state, + .cfg = cfg, + .interval_ns = interval_ms * std.time.ns_per_ms, + .allocator = allocator, + }; + } + + pub fn start(self: *Poller) !void { + self.thread = try std.Thread.spawn(.{}, pollLoop, .{self}); + } + + pub fn stop(self: *Poller) void { + self.should_stop.store(true, .release); + if (self.thread) |t| { + t.join(); + self.thread = null; + } + } + + pub fn triggerRefresh(self: *Poller) void { + self.force_refresh.store(true, .release); + } + + fn pollLoop(self: *Poller) void { + while (!self.should_stop.load(.acquire)) { + self.fetchAll(); + + // Sleep in small increments to allow responsive stopping/force-refresh + var slept: u64 = 0; + const step = 500 * std.time.ns_per_ms; // 500ms increments + while (slept < self.interval_ns) { + if (self.should_stop.load(.acquire)) return; + if (self.force_refresh.load(.acquire)) { + self.force_refresh.store(false, .release); + break; + } + std.Thread.sleep(step); + slept += step; + } + } + } + + fn fetchAll(self: *Poller) void { + const alloc = self.allocator; + + // Collect all configured nodes + var rows_list: std.ArrayListUnmanaged(NodeRow) = .empty; + + for (self.cfg.clusters) |cluster| { + // Find the matching PVE cluster config + var pve_cluster: ?config.ProxmoxCluster = null; + for (self.cfg.proxmox.clusters) |pc| { + if (std.mem.eql(u8, pc.name, cluster.proxmox_cluster)) { + pve_cluster = pc; + break; + } + } + + // Fetch PVE VM statuses + var pve_vms: []proxmox.VmStatus = &.{}; + if (pve_cluster) |pc| { + var pve_client = proxmox.ProxmoxClient.init(alloc, pc); + defer pve_client.deinit(); + pve_vms = pve_client.getClusterResources() catch &.{}; + } + defer { + for (pve_vms) |vm| { + alloc.free(vm.name); + alloc.free(vm.status); + alloc.free(vm.node); + } + if (pve_vms.len > 0) alloc.free(pve_vms); + } + + // Fetch Talos etcd members + var talos_client = talos.TalosClient.init(alloc, self.cfg.talos); + defer talos_client.deinit(); + const etcd_members = talos_client.getEtcdMembers(); + defer { + for (etcd_members) |m| alloc.free(m.hostname); + if (etcd_members.len > 0) alloc.free(etcd_members); + } + + // Build a row for each configured node + for (cluster.nodes) |node| { + // Match PVE VM status by VMID + var vm_status: []const u8 = "unknown"; + for (pve_vms) |vm| { + if (vm.vmid == node.proxmox_vmid) { + vm_status = vm.status; + break; + } + } + + // Match etcd membership by hostname + var etcd_role: []const u8 = "-"; + for (etcd_members) |m| { + if (std.mem.eql(u8, m.hostname, node.name)) { + etcd_role = if (m.is_learner) "learner" else "member"; + break; + } + } + + // Fetch Talos version for this node + var talos_ver: []const u8 = "-"; + var k8s_ver: []const u8 = "-"; + if (talos_client.getVersion(node.ip)) |ver| { + talos_ver = ver.talos_version; + k8s_ver = ver.kubernetes_version; + // Note: ver.node is freed by us since we'll dupe the strings below + alloc.free(ver.node); + } + + // Determine health + const health: []const u8 = if (std.mem.eql(u8, vm_status, "running")) + (if (!std.mem.eql(u8, talos_ver, "-")) "healthy" else "degraded") + else if (std.mem.eql(u8, vm_status, "stopped")) + "stopped" + else + "unknown"; + + const vmid_str = std.fmt.allocPrint(alloc, "{d}", .{node.proxmox_vmid}) catch continue; + + rows_list.append(alloc, .{ + .name = alloc.dupe(u8, node.name) catch continue, + .role = alloc.dupe(u8, node.role) catch continue, + .ip = alloc.dupe(u8, node.ip) catch continue, + .pve_node = alloc.dupe(u8, node.proxmox_node) catch continue, + .vmid = vmid_str, + .talos_ver = alloc.dupe(u8, talos_ver) catch continue, + .k8s_ver = alloc.dupe(u8, k8s_ver) catch continue, + .etcd = alloc.dupe(u8, etcd_role) catch continue, + .health = alloc.dupe(u8, health) catch continue, + }) catch continue; + + // Free talos version strings if they were allocated + if (!std.mem.eql(u8, talos_ver, "-")) alloc.free(talos_ver); + if (!std.mem.eql(u8, k8s_ver, "-")) alloc.free(k8s_ver); + } + } + + const new_rows = rows_list.toOwnedSlice(alloc) catch return; + self.state.swapRows(new_rows); + + // Fetch storage data + self.fetchStorage(); + + // Fetch backup data + self.fetchBackups(); + + // Fetch performance data + self.fetchPerformance(); + } + + fn fetchStorage(self: *Poller) void { + const alloc = self.allocator; + var pools_list: std.ArrayListUnmanaged(StoragePoolRow) = .empty; + var disks_list: std.ArrayListUnmanaged(VmDiskRow) = .empty; + + for (self.cfg.proxmox.clusters) |pc| { + var pve_client = proxmox.ProxmoxClient.init(alloc, pc); + defer pve_client.deinit(); + + // Fetch storage pools + const storage_pools = pve_client.getStoragePools() catch &.{}; + defer { + for (storage_pools) |sp| { + alloc.free(sp.name); + alloc.free(sp.node); + alloc.free(sp.pool_type); + alloc.free(sp.status); + } + if (storage_pools.len > 0) alloc.free(storage_pools); + } + + for (storage_pools) |sp| { + const pct: f64 = if (sp.maxdisk > 0) + @as(f64, @floatFromInt(sp.disk)) / @as(f64, @floatFromInt(sp.maxdisk)) * 100.0 + else + 0.0; + + pools_list.append(alloc, .{ + .name = alloc.dupe(u8, sp.name) catch continue, + .node = alloc.dupe(u8, sp.node) catch continue, + .pool_type = alloc.dupe(u8, sp.pool_type) catch continue, + .used_str = formatBytes(alloc, sp.disk), + .total_str = formatBytes(alloc, sp.maxdisk), + .status = alloc.dupe(u8, sp.status) catch continue, + .usage_pct = pct, + }) catch continue; + } + + // Fetch VMs for disk info + const vms = pve_client.getClusterResources() catch &.{}; + defer { + for (vms) |vm| { + alloc.free(vm.name); + alloc.free(vm.status); + alloc.free(vm.node); + } + if (vms.len > 0) alloc.free(vms); + } + + for (vms) |vm| { + disks_list.append(alloc, .{ + .vm_name = alloc.dupe(u8, vm.name) catch continue, + .vmid = std.fmt.allocPrint(alloc, "{d}", .{vm.vmid}) catch continue, + .pool = alloc.dupe(u8, vm.node) catch continue, + .size_str = formatBytes(alloc, vm.maxdisk), + .size_bytes = vm.maxdisk, + }) catch continue; + } + } + + const new_pools = pools_list.toOwnedSlice(alloc) catch return; + const new_disks = disks_list.toOwnedSlice(alloc) catch return; + self.storage_state.swapData(new_pools, new_disks); + } + + fn fetchBackups(self: *Poller) void { + const alloc = self.allocator; + var backups_list: std.ArrayListUnmanaged(BackupRow) = .empty; + + for (self.cfg.proxmox.clusters) |pc| { + var pve_client = proxmox.ProxmoxClient.init(alloc, pc); + defer pve_client.deinit(); + + // Get storage pools to know where to look for backups + const pools = pve_client.getStoragePools() catch &.{}; + defer { + for (pools) |sp| { + alloc.free(sp.name); + alloc.free(sp.node); + alloc.free(sp.pool_type); + alloc.free(sp.status); + } + if (pools.len > 0) alloc.free(pools); + } + + // Get VMs for name lookup + const vms = pve_client.getClusterResources() catch &.{}; + defer { + for (vms) |vm| { + alloc.free(vm.name); + alloc.free(vm.status); + alloc.free(vm.node); + } + if (vms.len > 0) alloc.free(vms); + } + + // For each storage pool, list backups + for (pools) |sp| { + const entries = pve_client.listBackups(sp.node, sp.name) catch &.{}; + defer { + for (entries) |e| { + alloc.free(e.volid); + alloc.free(e.node); + alloc.free(e.storage); + alloc.free(e.format); + } + if (entries.len > 0) alloc.free(entries); + } + + for (entries) |entry| { + // Find VM name by VMID + var vm_name: []const u8 = "unknown"; + for (vms) |vm| { + if (vm.vmid == entry.vmid) { + vm_name = vm.name; + break; + } + } + + // Compute age + const now = std.time.timestamp(); + const age_secs = now - entry.ctime; + const age_days: u32 = @intCast(@max(0, @divTrunc(age_secs, 86400))); + const is_stale = age_days > self.cfg.tui_settings.stale_days; + + backups_list.append(alloc, .{ + .volid = alloc.dupe(u8, entry.volid) catch continue, + .node = alloc.dupe(u8, entry.node) catch continue, + .storage = alloc.dupe(u8, entry.storage) catch continue, + .vm_name = alloc.dupe(u8, vm_name) catch continue, + .vmid = std.fmt.allocPrint(alloc, "{d}", .{entry.vmid}) catch continue, + .size_str = formatBytes(alloc, entry.size), + .date_str = formatEpoch(alloc, entry.ctime), + .age_days = age_days, + .is_stale = is_stale, + }) catch continue; + } + } + } + + const new_backups = backups_list.toOwnedSlice(alloc) catch return; + const new_k8s = self.fetchK8sBackups(); + self.backup_state.swapData(new_backups, new_k8s); + } + + fn fetchK8sBackups(self: *Poller) []K8sBackupRow { + const alloc = self.allocator; + const kubeconfig = kubernetes.deriveKubeconfig(alloc, self.cfg.talos.config_path) orelse return &.{}; + defer alloc.free(kubeconfig); + + var client = kubernetes.KubeClient.init(alloc, kubeconfig); + defer client.deinit(); + + const providers = client.detectProviders(); + var k8s_list: std.ArrayListUnmanaged(K8sBackupRow) = .empty; + + if (providers.volsync) { + const entries = client.getVolsyncSources(); + defer { + for (entries) |e| { + alloc.free(e.name); + alloc.free(e.namespace); + alloc.free(e.source_type); + alloc.free(e.status); + alloc.free(e.schedule); + alloc.free(e.last_run); + } + if (entries.len > 0) alloc.free(entries); + } + for (entries) |e| { + k8s_list.append(alloc, .{ + .name = alloc.dupe(u8, e.name) catch continue, + .namespace = alloc.dupe(u8, e.namespace) catch continue, + .source_type = alloc.dupe(u8, e.source_type) catch continue, + .status = alloc.dupe(u8, e.status) catch continue, + .schedule = alloc.dupe(u8, e.schedule) catch continue, + .last_run = alloc.dupe(u8, e.last_run) catch continue, + }) catch continue; + } + } + + if (providers.velero) { + const entries = client.getVeleroBackups(); + defer { + for (entries) |e| { + alloc.free(e.name); + alloc.free(e.namespace); + alloc.free(e.source_type); + alloc.free(e.status); + alloc.free(e.schedule); + alloc.free(e.last_run); + } + if (entries.len > 0) alloc.free(entries); + } + for (entries) |e| { + k8s_list.append(alloc, .{ + .name = alloc.dupe(u8, e.name) catch continue, + .namespace = alloc.dupe(u8, e.namespace) catch continue, + .source_type = alloc.dupe(u8, e.source_type) catch continue, + .status = alloc.dupe(u8, e.status) catch continue, + .schedule = alloc.dupe(u8, e.schedule) catch continue, + .last_run = alloc.dupe(u8, e.last_run) catch continue, + }) catch continue; + } + } + + return k8s_list.toOwnedSlice(alloc) catch &.{}; + } + + fn fetchPerformance(self: *Poller) void { + const alloc = self.allocator; + + // Host metrics from PVE API + var hosts_list: std.ArrayListUnmanaged(HostRow) = .empty; + for (self.cfg.proxmox.clusters) |pc| { + var pve_client = proxmox.ProxmoxClient.init(alloc, pc); + defer pve_client.deinit(); + + // Get distinct node names from cluster resources + const vms = pve_client.getClusterResources() catch &.{}; + defer { + for (vms) |vm| { + alloc.free(vm.name); + alloc.free(vm.status); + alloc.free(vm.node); + } + if (vms.len > 0) alloc.free(vms); + } + + // Collect unique node names + var seen_nodes: std.ArrayListUnmanaged([]const u8) = .empty; + defer { + for (seen_nodes.items) |n| alloc.free(n); + seen_nodes.deinit(alloc); + } + + for (vms) |vm| { + var found = false; + for (seen_nodes.items) |n| { + if (std.mem.eql(u8, n, vm.node)) { + found = true; + break; + } + } + if (!found) { + seen_nodes.append(alloc, alloc.dupe(u8, vm.node) catch continue) catch continue; + } + } + + for (seen_nodes.items) |node_name| { + const ns = pve_client.getNodeStatus(node_name) catch continue orelse continue; + const mem_pct: f64 = if (ns.maxmem > 0) + @as(f64, @floatFromInt(ns.mem)) / @as(f64, @floatFromInt(ns.maxmem)) * 100.0 + else + 0.0; + + hosts_list.append(alloc, .{ + .name = alloc.dupe(u8, ns.node) catch continue, + .cpu_pct = ns.cpu * 100.0, + .mem_used_str = formatBytes(alloc, ns.mem), + .mem_total_str = formatBytes(alloc, ns.maxmem), + .mem_pct = mem_pct, + }) catch continue; + + alloc.free(ns.node); + alloc.free(ns.status); + } + } + + // Pod metrics from Prometheus/VictoriaMetrics + var pods_list: std.ArrayListUnmanaged(PodMetricRow) = .empty; + var metrics_available = false; + + const kubeconfig = kubernetes.deriveKubeconfig(alloc, self.cfg.talos.config_path); + if (kubeconfig) |kc| { + defer alloc.free(kc); + + var mc = metrics_api.MetricsClient.init(alloc, kc); + defer mc.deinit(); + + if (mc.available) { + metrics_available = true; + + const cpu_data = mc.getPodCpu(); + defer self.freeMetricValues(cpu_data); + + const mem_data = mc.getPodMemory(); + defer self.freeMetricValues(mem_data); + + const rx_data = mc.getPodNetRx(); + defer self.freeMetricValues(rx_data); + + const tx_data = mc.getPodNetTx(); + defer self.freeMetricValues(tx_data); + + // Build pod map from CPU data (most common metric) + for (cpu_data) |cpu| { + const pod_name = getLabelStr(cpu.labels, "pod"); + const ns_name = getLabelStr(cpu.labels, "namespace"); + + // Find matching memory + var mem_val: f64 = 0; + for (mem_data) |m| { + if (std.mem.eql(u8, getLabelStr(m.labels, "pod"), pod_name) and + std.mem.eql(u8, getLabelStr(m.labels, "namespace"), ns_name)) + { + mem_val = m.value; + break; + } + } + + // Find matching network + var rx_val: f64 = 0; + var tx_val: f64 = 0; + for (rx_data) |r| { + if (std.mem.eql(u8, getLabelStr(r.labels, "pod"), pod_name) and + std.mem.eql(u8, getLabelStr(r.labels, "namespace"), ns_name)) + { + rx_val = r.value; + break; + } + } + for (tx_data) |t| { + if (std.mem.eql(u8, getLabelStr(t.labels, "pod"), pod_name) and + std.mem.eql(u8, getLabelStr(t.labels, "namespace"), ns_name)) + { + tx_val = t.value; + break; + } + } + + pods_list.append(alloc, .{ + .pod = alloc.dupe(u8, pod_name) catch continue, + .namespace = alloc.dupe(u8, ns_name) catch continue, + .cpu_str = std.fmt.allocPrint(alloc, "{d:.3}", .{cpu.value}) catch continue, + .mem_str = formatBytes(alloc, @intFromFloat(@max(0, mem_val))), + .net_rx_str = formatRate(alloc, rx_val), + .net_tx_str = formatRate(alloc, tx_val), + .cpu_cores = cpu.value, + .mem_bytes = mem_val, + }) catch continue; + } + } + } + + const new_hosts = hosts_list.toOwnedSlice(alloc) catch return; + const new_pods = pods_list.toOwnedSlice(alloc) catch return; + self.perf_state.swapData(new_hosts, new_pods, metrics_available); + } + + fn freeMetricValues(self: *Poller, values: []metrics_api.MetricsClient.PodMetricValue) void { + for (values) |v| { + var it = v.labels.iterator(); + while (it.next()) |entry| { + self.allocator.free(entry.key_ptr.*); + switch (entry.value_ptr.*) { + .string => |s| self.allocator.free(s), + else => {}, + } + } + var labels_copy = v.labels; + labels_copy.deinit(); + } + if (values.len > 0) self.allocator.free(values); + } + + pub fn deinit(self: *Poller) void { + self.stop(); + } +}; + +fn getLabelStr(labels: std.json.ObjectMap, key: []const u8) []const u8 { + const val = labels.get(key) orelse return ""; + return switch (val) { + .string => |s| s, + else => "", + }; +} diff --git a/tui/src/views/backups.zig b/tui/src/views/backups.zig new file mode 100644 index 0000000..1fe9643 --- /dev/null +++ b/tui/src/views/backups.zig @@ -0,0 +1,442 @@ +const std = @import("std"); +const vaxis = @import("vaxis"); +const poll = @import("../poll.zig"); + +pub const DeleteAction = struct { + node: []const u8, + storage: []const u8, + volid: []const u8, +}; + +pub const BackupView = struct { + selected: u16 = 0, + scroll: u16 = 0, + num_backups: u16 = 0, + stale_days: u32, + + // Total row count across both sections (for navigation) + total_rows: u16 = 0, + + // Confirmation dialog state + show_confirm: bool = false, + pending_idx: ?u16 = null, + + // Set by handleKey when user confirms deletion + delete_action: ?DeleteAction = null, + + // Search/filter state + filter_active: bool = false, + filter_buf: [64]u8 = undefined, + filter_len: u8 = 0, + + const pve_col_header = " VM Name VMID Date Size Storage Age"; + const k8s_col_header = " Name Namespace Source Status Schedule Last Run"; + + pub fn init(stale_days: u32) BackupView { + return .{ .stale_days = stale_days }; + } + + pub fn handleKey(self: *BackupView, key: vaxis.Key) void { + // Confirmation dialog intercepts all input + if (self.show_confirm) { + if (key.matches('y', .{})) { + self.show_confirm = false; + } else if (key.matches('n', .{}) or key.matches(vaxis.Key.escape, .{})) { + self.show_confirm = false; + self.pending_idx = null; + self.delete_action = null; + } + return; + } + + // Filter input mode intercepts all input + if (self.filter_active) { + if (key.matches(vaxis.Key.escape, .{}) or key.matches(vaxis.Key.enter, .{})) { + if (key.matches(vaxis.Key.escape, .{})) { + self.filter_len = 0; // Clear filter on Esc + } + self.filter_active = false; + } else if (key.matches(vaxis.Key.backspace, .{})) { + if (self.filter_len > 0) self.filter_len -= 1; + } else if (key.text) |text| { + for (text) |c| { + if (self.filter_len < self.filter_buf.len) { + self.filter_buf[self.filter_len] = c; + self.filter_len += 1; + } + } + } + return; + } + + if (key.matches('/', .{})) { + self.filter_active = true; + return; + } + + // Esc clears active filter when not in input mode + if (key.matches(vaxis.Key.escape, .{})) { + if (self.filter_len > 0) { + self.filter_len = 0; + self.selected = 0; + } + return; + } + + if (self.total_rows == 0) return; + + if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { + if (self.selected < self.total_rows - 1) self.selected += 1; + } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { + if (self.selected > 0) self.selected -= 1; + } else if (key.matches('g', .{})) { + self.selected = 0; + } else if (key.matches('G', .{ .shift = true })) { + if (self.total_rows > 0) self.selected = self.total_rows - 1; + } else if (key.matches('d', .{})) { + // Only allow deletion on PVE backup rows + if (self.selected < self.num_backups) { + self.pending_idx = self.selected; + self.show_confirm = true; + self.delete_action = null; + } + } + } + + pub fn draw( + self: *BackupView, + win: vaxis.Window, + backups: []const poll.BackupRow, + k8s_backups: []const poll.K8sBackupRow, + ) void { + // Apply filter + const filter = if (self.filter_len > 0) self.filter_buf[0..self.filter_len] else ""; + + // Count filtered rows + var pve_count: u16 = 0; + for (backups) |b| { + if (self.matchesFilter(b, filter)) pve_count += 1; + } + var k8s_count: u16 = 0; + for (k8s_backups) |b| { + if (self.matchesK8sFilter(b, filter)) k8s_count += 1; + } + + self.num_backups = pve_count; + self.total_rows = pve_count + k8s_count; + + if (self.total_rows == 0) { + if (filter.len > 0) { + drawCentered(win, "No backups matching filter"); + } else { + drawCentered(win, "No backups found"); + } + self.drawFilterBar(win); + return; + } + + // Clamp selection + if (self.selected >= self.total_rows) self.selected = self.total_rows - 1; + + var current_row: u16 = 0; + + // PVE Backups section + if (pve_count > 0) { + var pve_header_buf: [48]u8 = undefined; + const pve_header = std.fmt.bufPrint(&pve_header_buf, " PVE Backups ({d})", .{pve_count}) catch " PVE Backups"; + const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; + _ = win.print(&.{.{ .text = pve_header, .style = hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + + const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; + _ = win.print(&.{.{ .text = pve_col_header, .style = col_hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + + var pve_idx: u16 = 0; + for (backups) |b| { + if (!self.matchesFilter(b, filter)) continue; + if (current_row >= win.height -| 1) break; + const is_selected = (pve_idx == self.selected); + drawBackupRow(win, current_row, b, is_selected, self.stale_days); + current_row += 1; + pve_idx += 1; + } + } + + // K8s Backups section + if (k8s_count > 0) { + if (pve_count > 0 and current_row < win.height -| 3) { + // Separator + current_row += 1; + } + + var k8s_header_buf: [48]u8 = undefined; + const k8s_header = std.fmt.bufPrint(&k8s_header_buf, " K8s Backups ({d})", .{k8s_count}) catch " K8s Backups"; + if (current_row < win.height -| 1) { + const hdr_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 8 }, .bold = true }; + _ = win.print(&.{.{ .text = k8s_header, .style = hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + } + + if (current_row < win.height -| 1) { + const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; + _ = win.print(&.{.{ .text = k8s_col_header, .style = col_hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + } + + var k8s_idx: u16 = 0; + for (k8s_backups) |b| { + if (!self.matchesK8sFilter(b, filter)) continue; + if (current_row >= win.height -| 1) break; + const logical_idx = pve_count + k8s_idx; + const is_selected = (logical_idx == self.selected); + drawK8sRow(win, current_row, b, is_selected); + current_row += 1; + k8s_idx += 1; + } + } else if (pve_count > 0 and current_row < win.height -| 2) { + // Show "no K8s providers" hint + current_row += 1; + const hint_style: vaxis.Style = .{ .fg = .{ .index = 8 } }; + _ = win.print(&.{.{ .text = " K8s Backups: No providers detected", .style = hint_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + } + + // Filter bar at bottom + self.drawFilterBar(win); + + // Confirmation dialog overlay + if (self.show_confirm) { + if (self.pending_idx) |idx| { + // Map filtered idx back to actual backup + var actual_idx: u16 = 0; + var filtered_count: u16 = 0; + for (backups, 0..) |_, i| { + if (self.matchesFilter(backups[i], filter)) { + if (filtered_count == idx) { + actual_idx = @intCast(i); + break; + } + filtered_count += 1; + } + } + if (actual_idx < backups.len) { + self.drawConfirmDialog(win, backups[actual_idx]); + } + } + } + } + + fn drawFilterBar(self: *BackupView, win: vaxis.Window) void { + if (!self.filter_active and self.filter_len == 0) return; + + const bar_row = win.height -| 1; + const filter_text = self.filter_buf[0..self.filter_len]; + + if (self.filter_active) { + var buf: [80]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " / filter: {s}_", .{filter_text}) catch " / filter: "; + _ = win.print(&.{.{ .text = line, .style = .{ + .fg = .{ .index = 6 }, + .bg = .{ .index = 8 }, + .bold = true, + } }}, .{ + .row_offset = bar_row, + .wrap = .none, + }); + } else if (self.filter_len > 0) { + var buf: [80]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " filter: {s} (/ to edit, Esc to clear)", .{filter_text}) catch ""; + _ = win.print(&.{.{ .text = line, .style = .{ + .fg = .{ .index = 8 }, + } }}, .{ + .row_offset = bar_row, + .wrap = .none, + }); + } + } + + fn matchesFilter(self: *BackupView, b: poll.BackupRow, filter: []const u8) bool { + _ = self; + if (filter.len == 0) return true; + if (containsInsensitive(b.vm_name, filter)) return true; + if (containsInsensitive(b.vmid, filter)) return true; + if (containsInsensitive(b.storage, filter)) return true; + if (containsInsensitive(b.date_str, filter)) return true; + return false; + } + + fn matchesK8sFilter(self: *BackupView, b: poll.K8sBackupRow, filter: []const u8) bool { + _ = self; + if (filter.len == 0) return true; + if (containsInsensitive(b.name, filter)) return true; + if (containsInsensitive(b.namespace, filter)) return true; + if (containsInsensitive(b.source_type, filter)) return true; + if (containsInsensitive(b.status, filter)) return true; + return false; + } + + fn drawBackupRow(win: vaxis.Window, row: u16, b: poll.BackupRow, selected: bool, stale_days: u32) void { + const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; + const base_fg: vaxis.Color = if (selected) + .{ .index = 0 } + else if (b.age_days > stale_days * 2) + .{ .index = 1 } // red: very stale + else if (b.is_stale) + .{ .index = 3 } // yellow: stale + else + .{ .index = 7 }; // normal + + const style: vaxis.Style = .{ .fg = base_fg, .bg = bg }; + + var age_buf: [16]u8 = undefined; + const age_str = std.fmt.bufPrint(&age_buf, "{d}d", .{b.age_days}) catch "?d"; + + var buf: [256]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " {s:<16} {s:<7} {s:<17} {s:<12} {s:<13} {s}", .{ + truncate(b.vm_name, 16), + truncate(b.vmid, 7), + truncate(b.date_str, 17), + truncate(b.size_str, 12), + truncate(b.storage, 13), + age_str, + }) catch return; + + _ = win.print(&.{.{ .text = line, .style = style }}, .{ + .row_offset = row, + .wrap = .none, + }); + } + + fn drawK8sRow(win: vaxis.Window, row: u16, b: poll.K8sBackupRow, selected: bool) void { + const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; + const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; + const style: vaxis.Style = .{ .fg = fg, .bg = bg }; + + var buf: [256]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " {s:<24} {s:<14} {s:<9} {s:<12} {s:<16} {s}", .{ + truncate(b.name, 24), + truncate(b.namespace, 14), + truncate(b.source_type, 9), + truncate(b.status, 12), + truncate(b.schedule, 16), + truncate(b.last_run, 20), + }) catch return; + + _ = win.print(&.{.{ .text = line, .style = style }}, .{ + .row_offset = row, + .wrap = .none, + }); + } + + fn drawConfirmDialog(self: *BackupView, win: vaxis.Window, backup: poll.BackupRow) void { + _ = self; + const box_w: u16 = 52; + const box_h: u16 = 7; + const x: i17 = @intCast(if (win.width > box_w) (win.width - box_w) / 2 else 0); + const y: i17 = @intCast(if (win.height > box_h) (win.height - box_h) / 2 else 0); + + const dialog = win.child(.{ + .x_off = x, + .y_off = y, + .width = box_w, + .height = box_h, + .border = .{ .where = .all, .style = .{ .fg = .{ .index = 1 } } }, + }); + + dialog.fill(.{ .style = .{ .bg = .{ .index = 0 } } }); + + const title_style: vaxis.Style = .{ .fg = .{ .index = 1 }, .bg = .{ .index = 0 }, .bold = true }; + const text_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bg = .{ .index = 0 } }; + const hint_style: vaxis.Style = .{ .fg = .{ .index = 8 }, .bg = .{ .index = 0 } }; + + _ = dialog.print(&.{.{ .text = " Delete Backup?", .style = title_style }}, .{ + .row_offset = 0, + .wrap = .none, + }); + + var name_buf: [48]u8 = undefined; + const name_line = std.fmt.bufPrint(&name_buf, " {s}", .{truncate(backup.volid, 46)}) catch " ?"; + _ = dialog.print(&.{.{ .text = name_line, .style = text_style }}, .{ + .row_offset = 2, + .wrap = .none, + }); + + _ = dialog.print(&.{.{ .text = " y: confirm n/Esc: cancel", .style = hint_style }}, .{ + .row_offset = 4, + .wrap = .none, + }); + } + + /// Check if there's a pending delete action and consume it. + pub fn consumeDeleteAction(self: *BackupView, backups: []const poll.BackupRow) ?DeleteAction { + if (self.delete_action != null) { + self.delete_action = null; + self.pending_idx = null; + return null; + } + // Check if confirm was just accepted + if (!self.show_confirm and self.pending_idx != null) { + const idx = self.pending_idx.?; + if (idx < backups.len) { + const b = backups[idx]; + self.pending_idx = null; + return .{ + .node = b.node, + .storage = b.storage, + .volid = b.volid, + }; + } + } + return null; + } + + fn drawCentered(win: vaxis.Window, msg: []const u8) void { + const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = col, + .row_offset = win.height / 2, + .wrap = .none, + }); + } + + fn truncate(s: []const u8, max: usize) []const u8 { + return if (s.len > max) s[0..max] else s; + } +}; + +/// Case-insensitive substring check (ASCII only). +fn containsInsensitive(haystack: []const u8, needle: []const u8) bool { + if (needle.len == 0) return true; + if (needle.len > haystack.len) return false; + const limit = haystack.len - needle.len + 1; + for (0..limit) |i| { + var match = true; + for (0..needle.len) |j| { + if (toLower(haystack[i + j]) != toLower(needle[j])) { + match = false; + break; + } + } + if (match) return true; + } + return false; +} + +fn toLower(c: u8) u8 { + return if (c >= 'A' and c <= 'Z') c + 32 else c; +} diff --git a/tui/src/views/cluster.zig b/tui/src/views/cluster.zig new file mode 100644 index 0000000..d6f5658 --- /dev/null +++ b/tui/src/views/cluster.zig @@ -0,0 +1,82 @@ +const std = @import("std"); +const vaxis = @import("vaxis"); +const poll = @import("../poll.zig"); + +const Table = vaxis.widgets.Table; +const Cell = vaxis.Cell; + +pub const ClusterView = struct { + table_ctx: Table.TableContext, + num_rows: u16 = 0, + + pub fn init() ClusterView { + return .{ + .table_ctx = .{ + .active = true, + .selected_bg = .{ .index = 4 }, + .selected_fg = .{ .index = 0 }, + .active_bg = .{ .index = 4 }, + .active_fg = .{ .index = 0 }, + .hdr_bg_1 = .{ .index = 8 }, + .hdr_bg_2 = .{ .index = 8 }, + .row_bg_1 = .default, + .row_bg_2 = .default, + .col_width = .dynamic_fill, + .header_names = .{ .custom = &.{ + "Name", "Role", "IP", "PVE Node", "VMID", "Talos Ver", "K8s Ver", "Etcd", "Health", + } }, + }, + }; + } + + pub fn handleKey(self: *ClusterView, key: vaxis.Key) void { + if (self.num_rows == 0) return; + + if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { + if (self.table_ctx.row < self.num_rows - 1) { + self.table_ctx.row += 1; + } + } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { + if (self.table_ctx.row > 0) { + self.table_ctx.row -= 1; + } + } else if (key.matches('g', .{})) { + // gg: go to top (single g for now) + self.table_ctx.row = 0; + } else if (key.matches('G', .{ .shift = true })) { + // G: go to bottom + if (self.num_rows > 0) { + self.table_ctx.row = self.num_rows - 1; + } + } + } + + pub fn draw(self: *ClusterView, alloc: std.mem.Allocator, win: vaxis.Window, rows: []const poll.NodeRow) void { + self.num_rows = @intCast(rows.len); + if (rows.len == 0) { + self.drawEmpty(win); + return; + } + + // Clamp selected row + if (self.table_ctx.row >= self.num_rows) { + self.table_ctx.row = self.num_rows - 1; + } + + Table.drawTable(alloc, win, rows, &self.table_ctx) catch { + self.drawEmpty(win); + }; + } + + fn drawEmpty(self: *ClusterView, win: vaxis.Window) void { + _ = self; + const msg = "No cluster data available"; + const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + const row: u16 = win.height / 2; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = col, + .row_offset = row, + .wrap = .none, + }); + } +}; diff --git a/tui/src/views/performance.zig b/tui/src/views/performance.zig new file mode 100644 index 0000000..7861f58 --- /dev/null +++ b/tui/src/views/performance.zig @@ -0,0 +1,380 @@ +const std = @import("std"); +const vaxis = @import("vaxis"); +const poll = @import("../poll.zig"); + +const SortColumn = enum { pod, namespace, cpu, memory, net_rx, net_tx }; + +pub const PerformanceView = struct { + // Pod table navigation + selected: u16 = 0, + scroll: u16 = 0, + num_pods: u16 = 0, + + // Sorting + sort_col: SortColumn = .cpu, + sort_asc: bool = false, // descending by default (highest first) + + // Namespace filter + ns_filter: ?[]const u8 = null, // null = all namespaces + ns_index: u16 = 0, // index into discovered namespaces (0 = all) + + const host_header = " Host Overview"; + const host_col_header = " Node CPU Memory"; + const pod_col_header = " Pod Namespace CPU Memory Net RX Net TX"; + + pub fn init() PerformanceView { + return .{}; + } + + pub fn handleKey(self: *PerformanceView, key: vaxis.Key) void { + if (key.matches('s', .{})) { + self.cycleSortCol(); + } else if (key.matches('S', .{ .shift = true })) { + self.sort_asc = !self.sort_asc; + } else if (key.matches('n', .{})) { + self.ns_index +%= 1; // wraps, clamped in draw + } else if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { + if (self.num_pods > 0 and self.selected < self.num_pods - 1) self.selected += 1; + } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { + if (self.selected > 0) self.selected -= 1; + } else if (key.matches('g', .{})) { + self.selected = 0; + } else if (key.matches('G', .{ .shift = true })) { + if (self.num_pods > 0) self.selected = self.num_pods - 1; + } + } + + fn cycleSortCol(self: *PerformanceView) void { + self.sort_col = switch (self.sort_col) { + .pod => .namespace, + .namespace => .cpu, + .cpu => .memory, + .memory => .net_rx, + .net_rx => .net_tx, + .net_tx => .pod, + }; + } + + pub fn draw( + self: *PerformanceView, + alloc: std.mem.Allocator, + win: vaxis.Window, + hosts: []const poll.HostRow, + pods: []const poll.PodMetricRow, + metrics_available: bool, + ) void { + if (!metrics_available and hosts.len == 0) { + drawCentered(win, "No metrics backend detected"); + return; + } + + var current_row: u16 = 0; + + // Host overview section + if (hosts.len > 0) { + const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; + _ = win.print(&.{.{ .text = host_header, .style = hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + + const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; + _ = win.print(&.{.{ .text = host_col_header, .style = col_hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + + for (hosts) |h| { + if (current_row >= win.height -| 4) break; + self.drawHostRow(win, current_row, h); + current_row += 1; + } + current_row += 1; // spacing + } + + // Pod metrics section + if (!metrics_available) { + if (current_row < win.height -| 2) { + const hint: vaxis.Style = .{ .fg = .{ .index = 8 } }; + _ = win.print(&.{.{ .text = " Pod Metrics: No Prometheus/VictoriaMetrics detected", .style = hint }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + } + return; + } + + // Discover namespaces and apply filter + var namespaces: std.ArrayListUnmanaged([]const u8) = .empty; + defer namespaces.deinit(alloc); + for (pods) |p| { + var found = false; + for (namespaces.items) |ns| { + if (std.mem.eql(u8, ns, p.namespace)) { + found = true; + break; + } + } + if (!found) { + namespaces.append(alloc, p.namespace) catch continue; + } + } + + // Sort namespaces alphabetically + std.mem.sort([]const u8, namespaces.items, {}, struct { + fn cmp(_: void, a: []const u8, b: []const u8) bool { + return std.mem.order(u8, a, b) == .lt; + } + }.cmp); + + // Clamp namespace index (0 = all, 1..N = specific) + const total_ns = namespaces.items.len; + if (total_ns > 0 and self.ns_index > total_ns) { + self.ns_index = 0; + } + + const active_ns: ?[]const u8 = if (self.ns_index > 0 and self.ns_index <= total_ns) + namespaces.items[self.ns_index - 1] + else + null; + + // Filter pods by namespace + var filtered: std.ArrayListUnmanaged(poll.PodMetricRow) = .empty; + defer filtered.deinit(alloc); + for (pods) |p| { + if (active_ns) |ns| { + if (!std.mem.eql(u8, p.namespace, ns)) continue; + } + filtered.append(alloc, p) catch continue; + } + + // Sort filtered pods + self.sortPods(filtered.items); + + self.num_pods = @intCast(filtered.items.len); + if (self.selected >= self.num_pods and self.num_pods > 0) self.selected = self.num_pods - 1; + + // Pod header + { + var hdr_buf: [64]u8 = undefined; + const ns_label = if (active_ns) |ns| ns else "all"; + const pod_header = std.fmt.bufPrint(&hdr_buf, " Pod Metrics ({d}) [ns: {s}]", .{ + filtered.items.len, ns_label, + }) catch " Pod Metrics"; + const hdr_style: vaxis.Style = .{ .fg = .{ .index = 5 }, .bg = .{ .index = 8 }, .bold = true }; + if (current_row < win.height -| 2) { + _ = win.print(&.{.{ .text = pod_header, .style = hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + } + } + + // Sort indicator in column headers + { + const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; + if (current_row < win.height -| 1) { + _ = win.print(&.{.{ .text = pod_col_header, .style = col_hdr_style }}, .{ + .row_offset = current_row, + .wrap = .none, + }); + current_row += 1; + } + } + + // Scrolling + const visible = win.height -| current_row -| 1; + if (self.selected < self.scroll) { + self.scroll = self.selected; + } else if (self.selected >= self.scroll + visible) { + self.scroll = self.selected - visible + 1; + } + + // Pod rows + const start = self.scroll; + const end: u16 = @intCast(@min(filtered.items.len, start + visible)); + var idx: u16 = 0; + for (filtered.items[start..end]) |p| { + if (current_row >= win.height -| 1) break; + const is_selected = (start + idx == self.selected); + drawPodRow(win, current_row, p, is_selected); + current_row += 1; + idx += 1; + } + + // Status hints at bottom + if (win.height > 1) { + const sort_name = switch (self.sort_col) { + .pod => "pod", + .namespace => "ns", + .cpu => "cpu", + .memory => "mem", + .net_rx => "rx", + .net_tx => "tx", + }; + const dir = if (self.sort_asc) "asc" else "desc"; + var hint_buf: [64]u8 = undefined; + const hint = std.fmt.bufPrint(&hint_buf, " sort: {s} ({s}) s:cycle S:reverse n:namespace", .{ sort_name, dir }) catch ""; + _ = win.print(&.{.{ .text = hint, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .row_offset = win.height - 1, + .wrap = .none, + }); + } + } + + fn drawHostRow(self: *PerformanceView, win: vaxis.Window, row: u16, h: poll.HostRow) void { + _ = self; + const style: vaxis.Style = .{ .fg = .{ .index = 7 } }; + + var buf: [128]u8 = undefined; + const text = std.fmt.bufPrint(&buf, " {s:<18}", .{truncate(h.name, 18)}) catch return; + _ = win.print(&.{.{ .text = text, .style = style }}, .{ + .row_offset = row, + .wrap = .none, + }); + + // CPU bar at col 20 + drawBar(win, row, 20, h.cpu_pct, 15); + + // Memory bar at col 52 + var mem_buf: [32]u8 = undefined; + const mem_label = std.fmt.bufPrint(&mem_buf, " {s}/{s}", .{ + truncate(h.mem_used_str, 12), + truncate(h.mem_total_str, 12), + }) catch ""; + drawBar(win, row, 52, h.mem_pct, 15); + + _ = win.print(&.{.{ .text = mem_label, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = 74, + .row_offset = row, + .wrap = .none, + }); + } + + fn drawBar(win: vaxis.Window, row: u16, col: u16, pct: f64, width: u16) void { + const bar_color: vaxis.Color = if (pct > 90) + .{ .index = 1 } // red + else if (pct > 70) + .{ .index = 3 } // yellow + else + .{ .index = 2 }; // green + + const filled: u16 = @intFromFloat(@min( + @as(f64, @floatFromInt(width)), + @round(pct / 100.0 * @as(f64, @floatFromInt(width))), + )); + const empty_count = width - filled; + + var fill_buf: [60]u8 = undefined; + var fill_len: usize = 0; + for (0..filled) |_| { + const ch = "\u{2588}"; + @memcpy(fill_buf[fill_len..][0..ch.len], ch); + fill_len += ch.len; + } + + var empty_buf: [60]u8 = undefined; + var empty_len: usize = 0; + for (0..empty_count) |_| { + const ch = "\u{2591}"; + @memcpy(empty_buf[empty_len..][0..ch.len], ch); + empty_len += ch.len; + } + + var pct_buf: [8]u8 = undefined; + const pct_str = std.fmt.bufPrint(&pct_buf, "] {d:>3.0}%", .{pct}) catch "] ?%"; + + _ = win.print(&.{ + .{ .text = "[", .style = .{ .fg = .{ .index = 7 } } }, + .{ .text = fill_buf[0..fill_len], .style = .{ .fg = bar_color } }, + .{ .text = empty_buf[0..empty_len], .style = .{ .fg = .{ .index = 8 } } }, + .{ .text = pct_str, .style = .{ .fg = .{ .index = 7 } } }, + }, .{ + .col_offset = col, + .row_offset = row, + .wrap = .none, + }); + } + + fn drawPodRow(win: vaxis.Window, row: u16, p: poll.PodMetricRow, selected: bool) void { + const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; + const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; + const style: vaxis.Style = .{ .fg = fg, .bg = bg }; + + var buf: [256]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " {s:<33} {s:<16} {s:<9} {s:<12} {s:<12} {s}", .{ + truncate(p.pod, 33), + truncate(p.namespace, 16), + truncate(p.cpu_str, 9), + truncate(p.mem_str, 12), + truncate(p.net_rx_str, 12), + truncate(p.net_tx_str, 12), + }) catch return; + + _ = win.print(&.{.{ .text = line, .style = style }}, .{ + .row_offset = row, + .wrap = .none, + }); + } + + fn sortPods(self: *PerformanceView, items: []poll.PodMetricRow) void { + const asc = self.sort_asc; + switch (self.sort_col) { + .pod => std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + const ord = std.mem.order(u8, a.pod, b.pod); + return if (ascending) ord == .lt else ord == .gt; + } + }.cmp), + .namespace => std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + const ord = std.mem.order(u8, a.namespace, b.namespace); + return if (ascending) ord == .lt else ord == .gt; + } + }.cmp), + .cpu => std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + return if (ascending) a.cpu_cores < b.cpu_cores else a.cpu_cores > b.cpu_cores; + } + }.cmp), + .memory => std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + return if (ascending) a.mem_bytes < b.mem_bytes else a.mem_bytes > b.mem_bytes; + } + }.cmp), + .net_rx => { + // Sort by pod name as fallback since we don't store raw rx value in PodMetricRow + std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + const ord = std.mem.order(u8, a.net_rx_str, b.net_rx_str); + return if (ascending) ord == .lt else ord == .gt; + } + }.cmp); + }, + .net_tx => { + std.mem.sort(poll.PodMetricRow, items, asc, struct { + fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { + const ord = std.mem.order(u8, a.net_tx_str, b.net_tx_str); + return if (ascending) ord == .lt else ord == .gt; + } + }.cmp); + }, + } + } + + fn drawCentered(win: vaxis.Window, msg: []const u8) void { + const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = col, + .row_offset = win.height / 2, + .wrap = .none, + }); + } + + fn truncate(s: []const u8, max: usize) []const u8 { + return if (s.len > max) s[0..max] else s; + } +}; diff --git a/tui/src/views/storage.zig b/tui/src/views/storage.zig new file mode 100644 index 0000000..643a4c2 --- /dev/null +++ b/tui/src/views/storage.zig @@ -0,0 +1,287 @@ +const std = @import("std"); +const vaxis = @import("vaxis"); +const poll = @import("../poll.zig"); + +const Table = vaxis.widgets.Table; + +const Section = enum { pools, disks }; + +pub const StorageView = struct { + active_section: Section = .pools, + + // Pool section state + pool_selected: u16 = 0, + pool_scroll: u16 = 0, + num_pools: u16 = 0, + + // Disk section state + disk_table_ctx: Table.TableContext, + num_disks: u16 = 0, + + // Thresholds + warn_threshold: u8, + crit_threshold: u8, + + const pool_header = " Pool Name Node Type Used Total Usage Status"; + const pool_header_sep = " ─────────────────────────────────────────────────────────────────────────────────────────────"; + + pub fn init(warn: u8, crit: u8) StorageView { + return .{ + .warn_threshold = warn, + .crit_threshold = crit, + .disk_table_ctx = .{ + .active = false, + .selected_bg = .{ .index = 4 }, + .selected_fg = .{ .index = 0 }, + .active_bg = .{ .index = 4 }, + .active_fg = .{ .index = 0 }, + .hdr_bg_1 = .{ .index = 8 }, + .hdr_bg_2 = .{ .index = 8 }, + .row_bg_1 = .default, + .row_bg_2 = .default, + .col_width = .dynamic_fill, + .header_names = .{ .custom = &.{ "VM Name", "VMID", "Node", "Size" } }, + }, + }; + } + + pub fn handleKey(self: *StorageView, key: vaxis.Key) void { + if (key.matches(vaxis.Key.tab, .{})) { + self.toggleSection(); + return; + } + + switch (self.active_section) { + .pools => self.handlePoolKey(key), + .disks => self.handleDiskKey(key), + } + } + + fn toggleSection(self: *StorageView) void { + self.active_section = if (self.active_section == .pools) .disks else .pools; + self.disk_table_ctx.active = (self.active_section == .disks); + } + + fn handlePoolKey(self: *StorageView, key: vaxis.Key) void { + if (self.num_pools == 0) return; + if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { + if (self.pool_selected < self.num_pools - 1) self.pool_selected += 1; + } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { + if (self.pool_selected > 0) self.pool_selected -= 1; + } else if (key.matches('g', .{})) { + self.pool_selected = 0; + } else if (key.matches('G', .{ .shift = true })) { + if (self.num_pools > 0) self.pool_selected = self.num_pools - 1; + } + } + + fn handleDiskKey(self: *StorageView, key: vaxis.Key) void { + if (self.num_disks == 0) return; + if (key.matches('j', .{}) or key.matches(vaxis.Key.down, .{})) { + if (self.disk_table_ctx.row < self.num_disks - 1) self.disk_table_ctx.row += 1; + } else if (key.matches('k', .{}) or key.matches(vaxis.Key.up, .{})) { + if (self.disk_table_ctx.row > 0) self.disk_table_ctx.row -= 1; + } else if (key.matches('g', .{})) { + self.disk_table_ctx.row = 0; + } else if (key.matches('G', .{ .shift = true })) { + if (self.num_disks > 0) self.disk_table_ctx.row = self.num_disks - 1; + } + } + + pub fn draw( + self: *StorageView, + alloc: std.mem.Allocator, + win: vaxis.Window, + pools: []const poll.StoragePoolRow, + disks: []const poll.VmDiskRow, + ) void { + self.num_pools = @intCast(pools.len); + self.num_disks = @intCast(disks.len); + + if (pools.len == 0 and disks.len == 0) { + drawEmpty(win); + return; + } + + // Clamp selections + if (self.pool_selected >= self.num_pools and self.num_pools > 0) + self.pool_selected = self.num_pools - 1; + if (self.disk_table_ctx.row >= self.num_disks and self.num_disks > 0) + self.disk_table_ctx.row = self.num_disks - 1; + + // Split layout: pools get top portion, disks get bottom + const sep_row: u16 = @intCast(@max(4, @min(win.height -| 6, (win.height * 55) / 100))); + const pools_win = win.child(.{ .height = sep_row }); + const disks_win = win.child(.{ .y_off = @intCast(sep_row + 1), .height = win.height -| sep_row -| 1 }); + + // Separator line + self.drawSeparator(win, sep_row); + + // Draw sections + self.drawPools(pools_win, pools); + self.drawDisks(alloc, disks_win, disks); + } + + fn drawSeparator(self: *StorageView, win: vaxis.Window, row: u16) void { + const label = if (self.active_section == .disks) " VM Disks (active) " else " VM Disks "; + const style: vaxis.Style = .{ .fg = .{ .index = 8 } }; + const active_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bold = true }; + _ = win.print(&.{.{ + .text = label, + .style = if (self.active_section == .disks) active_style else style, + }}, .{ .row_offset = row, .wrap = .none }); + } + + fn drawPools(self: *StorageView, win: vaxis.Window, pools: []const poll.StoragePoolRow) void { + if (pools.len == 0) return; + + const is_active = (self.active_section == .pools); + const hdr_style: vaxis.Style = .{ .fg = .{ .index = 6 }, .bg = .{ .index = 8 }, .bold = true }; + const hdr_label = if (is_active) " Storage Pools (active)" else " Storage Pools"; + + // Header + _ = win.print(&.{.{ .text = hdr_label, .style = hdr_style }}, .{ .wrap = .none }); + + // Column headers (row 1) + const col_hdr_style: vaxis.Style = .{ .fg = .{ .index = 7 }, .bold = true }; + _ = win.print(&.{.{ .text = pool_header, .style = col_hdr_style }}, .{ + .row_offset = 1, + .wrap = .none, + }); + + // Scrolling + const visible_rows = win.height -| 2; + if (self.pool_selected < self.pool_scroll) { + self.pool_scroll = self.pool_selected; + } else if (self.pool_selected >= self.pool_scroll + visible_rows) { + self.pool_scroll = self.pool_selected - visible_rows + 1; + } + + // Rows + var row_idx: u16 = 0; + const start = self.pool_scroll; + const end: u16 = @intCast(@min(pools.len, start + visible_rows)); + for (pools[start..end]) |p| { + const display_row = row_idx + 2; // after header + col headers + const is_selected = is_active and (start + row_idx == self.pool_selected); + + self.drawPoolRow(win, display_row, p, is_selected); + row_idx += 1; + } + } + + fn drawPoolRow(self: *StorageView, win: vaxis.Window, row: u16, p: poll.StoragePoolRow, selected: bool) void { + const bg: vaxis.Color = if (selected) .{ .index = 4 } else .default; + const fg: vaxis.Color = if (selected) .{ .index = 0 } else .{ .index = 7 }; + const style: vaxis.Style = .{ .fg = fg, .bg = bg }; + + // Format: " name node type used total [bar] pct% status" + var buf: [256]u8 = undefined; + const line = std.fmt.bufPrint(&buf, " {s:<16} {s:<12} {s:<10} {s:<12} {s:<12}", .{ + truncate(p.name, 16), + truncate(p.node, 12), + truncate(p.pool_type, 10), + truncate(p.used_str, 12), + truncate(p.total_str, 12), + }) catch return; + + _ = win.print(&.{.{ .text = line, .style = style }}, .{ + .row_offset = row, + .wrap = .none, + }); + + // Usage bar at column 66 + self.drawUsageBar(win, row, 66, p.usage_pct, bg); + + // Status after bar (col ~84) + const status_style: vaxis.Style = .{ + .fg = if (std.mem.eql(u8, p.status, "available")) .{ .index = 2 } else .{ .index = 3 }, + .bg = bg, + }; + _ = win.print(&.{.{ .text = truncate(p.status, 10), .style = status_style }}, .{ + .col_offset = 85, + .row_offset = row, + .wrap = .none, + }); + } + + fn drawUsageBar(self: *StorageView, win: vaxis.Window, row: u16, col: u16, pct: f64, bg: vaxis.Color) void { + const bar_width: u16 = 10; + const remaining = 100.0 - pct; + const bar_color: vaxis.Color = if (remaining < @as(f64, @floatFromInt(self.crit_threshold))) + .{ .index = 1 } // red + else if (remaining < @as(f64, @floatFromInt(self.warn_threshold))) + .{ .index = 3 } // yellow + else + .{ .index = 2 }; // green + + const filled: u16 = @intFromFloat(@min( + @as(f64, @floatFromInt(bar_width)), + @round(pct / 100.0 * @as(f64, @floatFromInt(bar_width))), + )); + const empty_count = bar_width - filled; + + // Build fill/empty strings from Unicode blocks + var fill_buf: [30]u8 = undefined; + var fill_len: usize = 0; + for (0..filled) |_| { + const ch = "\u{2588}"; + @memcpy(fill_buf[fill_len..][0..ch.len], ch); + fill_len += ch.len; + } + + var empty_buf: [30]u8 = undefined; + var empty_len: usize = 0; + for (0..empty_count) |_| { + const ch = "\u{2591}"; + @memcpy(empty_buf[empty_len..][0..ch.len], ch); + empty_len += ch.len; + } + + var pct_buf: [8]u8 = undefined; + const pct_str = std.fmt.bufPrint(&pct_buf, "] {d:>3.0}%", .{pct}) catch "] ?%"; + + _ = win.print(&.{ + .{ .text = "[", .style = .{ .fg = .{ .index = 7 }, .bg = bg } }, + .{ .text = fill_buf[0..fill_len], .style = .{ .fg = bar_color, .bg = bg } }, + .{ .text = empty_buf[0..empty_len], .style = .{ .fg = .{ .index = 8 }, .bg = bg } }, + .{ .text = pct_str, .style = .{ .fg = .{ .index = 7 }, .bg = bg } }, + }, .{ + .col_offset = col, + .row_offset = row, + .wrap = .none, + }); + } + + fn drawDisks(self: *StorageView, alloc: std.mem.Allocator, win: vaxis.Window, disks: []const poll.VmDiskRow) void { + if (disks.len == 0) { + const msg = "No VM disk data"; + const c: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = c, + .row_offset = win.height / 2, + .wrap = .none, + }); + return; + } + + if (self.disk_table_ctx.row >= self.num_disks) + self.disk_table_ctx.row = self.num_disks - 1; + + Table.drawTable(alloc, win, disks, &self.disk_table_ctx) catch {}; + } + + fn drawEmpty(win: vaxis.Window) void { + const msg = "No storage data available"; + const col: u16 = if (win.width > msg.len) (win.width - @as(u16, @intCast(msg.len))) / 2 else 0; + _ = win.print(&.{.{ .text = msg, .style = .{ .fg = .{ .index = 8 } } }}, .{ + .col_offset = col, + .row_offset = win.height / 2, + .wrap = .none, + }); + } + + fn truncate(s: []const u8, max: usize) []const u8 { + return if (s.len > max) s[0..max] else s; + } +}; From f1eb588e1e76f1d2e8efe2a457ee3ee5291d2606 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Wed, 18 Mar 2026 23:09:01 +0100 Subject: [PATCH 03/14] feat: add pvt tui subcommand to launch vitui Searches for the vitui binary adjacent to pvt, in tui/zig-out/bin/, or in $PATH. Forwards --config flag and inherits stdio for seamless terminal passthrough. --- cmd/tui.go | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 cmd/tui.go diff --git a/cmd/tui.go b/cmd/tui.go new file mode 100644 index 0000000..7c19935 --- /dev/null +++ b/cmd/tui.go @@ -0,0 +1,66 @@ +package cmd + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/spf13/cobra" +) + +var tuiCmd = &cobra.Command{ + Use: "tui", + Short: "Launch the interactive TUI (vitui)", + Long: `Launches vitui, the interactive terminal UI for monitoring and managing your Talos-on-Proxmox cluster.`, + RunE: runTUI, +} + +func init() { + rootCmd.AddCommand(tuiCmd) +} + +func runTUI(cmd *cobra.Command, args []string) error { + binary, err := findVitui() + if err != nil { + return fmt.Errorf("vitui binary not found: %w\n\nInstall vitui by running: cd tui && zig build -Doptimize=ReleaseSafe", err) + } + + // Build vitui args, forwarding --config if set + var vituiArgs []string + if cfgFile != "" { + vituiArgs = append(vituiArgs, "--config", cfgFile) + } + + proc := exec.Command(binary, vituiArgs...) + proc.Stdin = os.Stdin + proc.Stdout = os.Stdout + proc.Stderr = os.Stderr + + return proc.Run() +} + +// findVitui searches for the vitui binary in standard locations. +func findVitui() (string, error) { + // 1. Adjacent to the pvt binary + self, err := os.Executable() + if err == nil { + adjacent := filepath.Join(filepath.Dir(self), "vitui") + if _, err := os.Stat(adjacent); err == nil { + return adjacent, nil + } + } + + // 2. In the tui/zig-out/bin/ directory relative to working dir + local := filepath.Join("tui", "zig-out", "bin", "vitui") + if _, err := os.Stat(local); err == nil { + return local, nil + } + + // 3. In $PATH + if p, err := exec.LookPath("vitui"); err == nil { + return p, nil + } + + return "", fmt.Errorf("not in PATH, not adjacent to pvt binary, and not in tui/zig-out/bin/") +} From 890987282bed4bfaa367f96d163688ae9385f93a Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 09:32:28 +0100 Subject: [PATCH 04/14] fix(tui): fix use-after-free in config parsing and PVE auth header Config strings from the YAML parser were slices into the source buffer which was freed at the end of load(). All string fields now duped via allocator so they survive yaml.deinit(). Also fix PVE API auth header to include token_id in the format PVEAPIToken== as required by the Proxmox API. --- tui/src/api/http_client.zig | 8 +++++--- tui/src/config.zig | 33 +++++++++++++++++++-------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/tui/src/api/http_client.zig b/tui/src/api/http_client.zig index 929d087..278d8aa 100644 --- a/tui/src/api/http_client.zig +++ b/tui/src/api/http_client.zig @@ -7,14 +7,16 @@ const Allocator = std.mem.Allocator; pub const HttpClient = struct { allocator: Allocator, endpoint: []const u8, - auth_header: []const u8, + token_id: []const u8, + token_secret: []const u8, tls_verify: bool, pub fn init(allocator: Allocator, pve: config.ProxmoxCluster) HttpClient { return .{ .allocator = allocator, .endpoint = pve.endpoint, - .auth_header = pve.token_secret, + .token_id = pve.token_id, + .token_secret = pve.token_secret, .tls_verify = pve.tls_verify, }; } @@ -33,7 +35,7 @@ pub const HttpClient = struct { const url = try std.fmt.allocPrint(self.allocator, "{s}{s}", .{ self.endpoint, path }); defer self.allocator.free(url); - const auth = try std.fmt.allocPrint(self.allocator, "Authorization: PVEAPIToken={s}", .{self.auth_header}); + const auth = try std.fmt.allocPrint(self.allocator, "Authorization: PVEAPIToken={s}={s}", .{ self.token_id, self.token_secret }); defer self.allocator.free(auth); var argv_list: std.ArrayListUnmanaged([]const u8) = .empty; diff --git a/tui/src/config.zig b/tui/src/config.zig index d5f006f..48f2114 100644 --- a/tui/src/config.zig +++ b/tui/src/config.zig @@ -128,6 +128,11 @@ pub fn load(alloc: Allocator, path: []const u8) !Config { return parseConfig(alloc, root_map); } +/// Dupe a string from the YAML tree so it outlives y.deinit(). +fn dupeStr(alloc: Allocator, map: Map, key: []const u8) ![]const u8 { + return alloc.dupe(u8, try getStr(map, key)); +} + fn parseConfig(alloc: Allocator, root: Map) !Config { const version = try getStr(root, "version"); if (!std.mem.eql(u8, version, "1")) { @@ -148,10 +153,10 @@ fn parseConfig(alloc: Allocator, root: Map) !Config { for (pve_clusters_list, 0..) |item, i| { const m = item.asMap() orelse return error.ConfigParseFailed; pve_clusters[i] = .{ - .name = try getStr(m, "name"), - .endpoint = try getStr(m, "endpoint"), - .token_id = try getStr(m, "token_id"), - .token_secret = try getStr(m, "token_secret"), + .name = try dupeStr(alloc, m, "name"), + .endpoint = try dupeStr(alloc, m, "endpoint"), + .token_id = try dupeStr(alloc, m, "token_id"), + .token_secret = try dupeStr(alloc, m, "token_secret"), .tls_verify = getBool(m, "tls_verify", true), }; } @@ -162,8 +167,8 @@ fn parseConfig(alloc: Allocator, root: Map) !Config { return error.ConfigParseFailed; }; const talos = TalosConfig{ - .config_path = try getStr(talos_map, "config_path"), - .context = try getStr(talos_map, "context"), + .config_path = try dupeStr(alloc, talos_map, "config_path"), + .context = try dupeStr(alloc, talos_map, "context"), }; // Parse clusters section @@ -179,17 +184,17 @@ fn parseConfig(alloc: Allocator, root: Map) !Config { for (nodes_list, 0..) |n, j| { const nm = n.asMap() orelse return error.ConfigParseFailed; nodes[j] = .{ - .name = try getStr(nm, "name"), - .role = try getStr(nm, "role"), + .name = try dupeStr(alloc, nm, "name"), + .role = try dupeStr(alloc, nm, "role"), .proxmox_vmid = getInt(nm, "proxmox_vmid", 0), - .proxmox_node = try getStr(nm, "proxmox_node"), - .ip = try getStr(nm, "ip"), + .proxmox_node = try dupeStr(alloc, nm, "proxmox_node"), + .ip = try dupeStr(alloc, nm, "ip"), }; } clusters[i] = .{ - .name = try getStr(m, "name"), - .proxmox_cluster = try getStr(m, "proxmox_cluster"), - .endpoint = try getStr(m, "endpoint"), + .name = try dupeStr(alloc, m, "name"), + .proxmox_cluster = try dupeStr(alloc, m, "proxmox_cluster"), + .endpoint = try dupeStr(alloc, m, "endpoint"), .nodes = nodes, }; } @@ -212,7 +217,7 @@ fn parseConfig(alloc: Allocator, root: Map) !Config { } return .{ - .version = version, + .version = try alloc.dupe(u8, version), .proxmox = .{ .clusters = pve_clusters }, .talos = talos, .clusters = clusters, From 050b54cd48645279275e080675c36ce834f10ec3 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:00:47 +0100 Subject: [PATCH 05/14] fix(tui): forward viper-resolved config path to vitui When --config isn't explicitly set, pvt tui now passes viper's resolved config file path to vitui so it can find pvt.yaml without the user having to specify it manually. --- cmd/tui.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/cmd/tui.go b/cmd/tui.go index 7c19935..895cf58 100644 --- a/cmd/tui.go +++ b/cmd/tui.go @@ -7,6 +7,7 @@ import ( "path/filepath" "github.com/spf13/cobra" + "github.com/spf13/viper" ) var tuiCmd = &cobra.Command{ @@ -26,10 +27,17 @@ func runTUI(cmd *cobra.Command, args []string) error { return fmt.Errorf("vitui binary not found: %w\n\nInstall vitui by running: cd tui && zig build -Doptimize=ReleaseSafe", err) } - // Build vitui args, forwarding --config if set + // Build vitui args, forwarding the resolved config path var vituiArgs []string - if cfgFile != "" { - vituiArgs = append(vituiArgs, "--config", cfgFile) + configPath := cfgFile + if configPath == "" { + // Use viper's resolved config path if available + if f := viper.ConfigFileUsed(); f != "" { + configPath = f + } + } + if configPath != "" { + vituiArgs = append(vituiArgs, "--config", configPath) } proc := exec.Command(binary, vituiArgs...) From fa396f5fe7207862fc71749ec4480a7a0b6dde07 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:09:56 +0100 Subject: [PATCH 06/14] fix(tui): fix tilde expansion, quit, and stderr TUI corruption - Expand ~ to $HOME in config string values so paths like ~/talos/apollo/talosconfig resolve correctly - Remove std.log.err() calls from HTTP client that wrote to stderr and corrupted the vaxis TUI rendering - Fix q key quit by calling loop.stop() and checking should_quit immediately after event handling --- tui/src/api/http_client.zig | 4 +--- tui/src/app.zig | 4 +++- tui/src/config.zig | 8 +++++++- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/tui/src/api/http_client.zig b/tui/src/api/http_client.zig index 278d8aa..2504f8b 100644 --- a/tui/src/api/http_client.zig +++ b/tui/src/api/http_client.zig @@ -55,8 +55,7 @@ pub const HttpClient = struct { .allocator = self.allocator, .argv = argv_list.items, .max_output_bytes = 1024 * 1024, - }) catch |err| { - std.log.err("failed to run curl: {}", .{err}); + }) catch { return error.HttpRequestFailed; }; defer self.allocator.free(result.stderr); @@ -67,7 +66,6 @@ pub const HttpClient = struct { } self.allocator.free(result.stdout); - std.log.err("curl {s} failed (exit {}): {s}", .{ method, term, result.stderr }); return error.HttpRequestFailed; } diff --git a/tui/src/app.zig b/tui/src/app.zig index 9af3149..58cd783 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -151,6 +151,7 @@ pub const App = struct { while (!self.should_quit) { const event = self.loop.nextEvent(); try self.handleEvent(alloc, event); + if (self.should_quit) break; try self.draw(); try self.vx.render(self.tty.writer()); } @@ -175,8 +176,9 @@ pub const App = struct { } // Global keys - if (key.matches('q', .{})) { + if (key.matches('q', .{}) or key.matches('q', .{ .ctrl = true })) { self.should_quit = true; + self.loop.stop(); return; } if (key.matches('?', .{})) { diff --git a/tui/src/config.zig b/tui/src/config.zig index 48f2114..edf3707 100644 --- a/tui/src/config.zig +++ b/tui/src/config.zig @@ -129,8 +129,14 @@ pub fn load(alloc: Allocator, path: []const u8) !Config { } /// Dupe a string from the YAML tree so it outlives y.deinit(). +/// Also expands leading ~ to $HOME. fn dupeStr(alloc: Allocator, map: Map, key: []const u8) ![]const u8 { - return alloc.dupe(u8, try getStr(map, key)); + const raw = try getStr(map, key); + if (raw.len > 0 and raw[0] == '~') { + const home = std.posix.getenv("HOME") orelse return alloc.dupe(u8, raw); + return std.fmt.allocPrint(alloc, "{s}{s}", .{ home, raw[1..] }); + } + return alloc.dupe(u8, raw); } fn parseConfig(alloc: Allocator, root: Map) !Config { From c500ddcb9679064a4fbacf65a2998c08bb2dc3d8 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:13:58 +0100 Subject: [PATCH 07/14] fix(tui): fix segfault on quit by removing loop.stop() from key handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't call loop.stop() from inside the event handler — it corrupts the loop state causing a NULL pointer memcpy crash. Instead just set should_quit and break from the event loop, then call exitAltScreen before deinit for clean terminal restoration. --- tui/src/app.zig | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tui/src/app.zig b/tui/src/app.zig index 58cd783..6b93e18 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -155,6 +155,9 @@ pub const App = struct { try self.draw(); try self.vx.render(self.tty.writer()); } + + // Leave alt screen cleanly before deinit + try self.vx.exitAltScreen(self.tty.writer()); } fn handleEvent(self: *App, alloc: std.mem.Allocator, event: Event) !void { @@ -178,7 +181,6 @@ pub const App = struct { // Global keys if (key.matches('q', .{}) or key.matches('q', .{ .ctrl = true })) { self.should_quit = true; - self.loop.stop(); return; } if (key.matches('?', .{})) { From d5c3e30c076ff560b30116fc6f00168df7111af9 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:15:59 +0100 Subject: [PATCH 08/14] fix(tui): restore terminal before joining poller thread on quit The poller thread can be blocked for up to 10s on a curl timeout or 30s on its sleep interval. Previously deinit() called poller.stop() (which joins the thread) before restoring the terminal, leaving the user staring at a frozen screen. Now: signal stop, restore terminal, then join. --- tui/src/app.zig | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/tui/src/app.zig b/tui/src/app.zig index 6b93e18..b8d9de2 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -120,10 +120,21 @@ pub const App = struct { } pub fn deinit(self: *App, alloc: std.mem.Allocator) void { - self.poller.stop(); + // Signal poller to stop (non-blocking) so it can begin winding down + self.poller.should_stop.store(true, .release); + + // Restore terminal FIRST so the user isn't staring at a frozen screen + // while we wait for background threads to finish self.loop.stop(); self.vx.deinit(alloc, self.tty.writer()); self.tty.deinit(); + + // Now wait for the poller thread to actually finish + if (self.poller.thread) |t| { + t.join(); + self.poller.thread = null; + } + self.cluster_state.deinit(); self.storage_state.deinit(); self.backup_state.deinit(); @@ -155,9 +166,6 @@ pub const App = struct { try self.draw(); try self.vx.render(self.tty.writer()); } - - // Leave alt screen cleanly before deinit - try self.vx.exitAltScreen(self.tty.writer()); } fn handleEvent(self: *App, alloc: std.mem.Allocator, event: Event) !void { From 04d44f2fd4a61aaf400117e95b0faf84f59b61f4 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:17:28 +0100 Subject: [PATCH 09/14] fix: quit pvt tui cleanly --- tui/src/app.zig | 21 +++++++++++++++------ tui/src/main.zig | 5 ++++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/tui/src/app.zig b/tui/src/app.zig index b8d9de2..7cc9312 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -115,19 +115,27 @@ pub const App = struct { .poller = poller, }; app.tty = try vaxis.Tty.init(&app.tty_buf); - app.loop = .{ .tty = &app.tty, .vaxis = &app.vx }; + // `App` is returned by value, so pointer-bearing runtime fields must be + // wired after the caller has the app at its final address. return app; } - pub fn deinit(self: *App, alloc: std.mem.Allocator) void { + pub fn restoreTerminal(self: *App, alloc: std.mem.Allocator) void { // Signal poller to stop (non-blocking) so it can begin winding down self.poller.should_stop.store(true, .release); - // Restore terminal FIRST so the user isn't staring at a frozen screen - // while we wait for background threads to finish - self.loop.stop(); + // `vaxis.Loop.stop()` wakes the reader by writing a device-status + // query, which can hang shutdown if the terminal never answers it. + // Mark the loop as quitting, restore the screen, then close the TTY. + // The normal quit path exits the process immediately after this, so we + // intentionally do not wait for background threads here. + self.loop.should_quit = true; self.vx.deinit(alloc, self.tty.writer()); self.tty.deinit(); + } + + pub fn deinit(self: *App, alloc: std.mem.Allocator) void { + self.restoreTerminal(alloc); // Now wait for the poller thread to actually finish if (self.poller.thread) |t| { @@ -147,8 +155,9 @@ pub const App = struct { } pub fn run(self: *App, alloc: std.mem.Allocator) !void { - // Now that self is at its final address, wire up the config pointer + // Now that self is at its final address, wire up runtime pointers. self.poller.cfg = &self.cfg; + self.loop = .{ .tty = &self.tty, .vaxis = &self.vx }; try self.loop.init(); try self.loop.start(); diff --git a/tui/src/main.zig b/tui/src/main.zig index 1e4832b..ab40e19 100644 --- a/tui/src/main.zig +++ b/tui/src/main.zig @@ -21,12 +21,15 @@ pub fn main() !void { std.log.err("failed to initialize TUI: {}", .{err}); std.process.exit(1); }; - defer app.deinit(alloc); app.run(alloc) catch |err| { + app.restoreTerminal(alloc); std.log.err("runtime error: {}", .{err}); std.process.exit(1); }; + + app.restoreTerminal(alloc); + std.process.exit(0); } fn parseArgs() ![]const u8 { From a7ff553b1de495607e50e7cb5d8ebf1942b08781 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 10:47:18 +0100 Subject: [PATCH 10/14] fix: address PR review feedback --- tui/src/app.zig | 81 ++++++++++++++++++++++++++++----------- tui/src/config.zig | 13 ++++++- tui/src/poll.zig | 41 +++++++------------- tui/src/views/backups.zig | 14 ++++++- 4 files changed, 96 insertions(+), 53 deletions(-) diff --git a/tui/src/app.zig b/tui/src/app.zig index 7cc9312..fdbd82c 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -355,44 +355,81 @@ pub const App = struct { fn drawContent(self: *App, win: vaxis.Window) void { switch (self.active_view) { .cluster => { - const rows = self.cluster_state.getRows(); - if (self.cluster_state.isLoading() and rows.len == 0) { + self.cluster_state.lock(); + defer self.cluster_state.unlock(); + + if (self.cluster_state.is_loading and self.cluster_state.rows.len == 0) { self.drawPlaceholder(win, "Loading cluster data..."); } else { - self.cluster_view.draw(self.alloc, win, rows); + self.cluster_view.draw(self.alloc, win, self.cluster_state.rows); } }, .storage => { - const pools = self.storage_state.getPools(); - const disks = self.storage_state.getVmDisks(); - if (self.storage_state.isLoading() and pools.len == 0) { + self.storage_state.lock(); + defer self.storage_state.unlock(); + + if (self.storage_state.is_loading and self.storage_state.pools.len == 0) { self.drawPlaceholder(win, "Loading storage data..."); } else { - self.storage_view.draw(self.alloc, win, pools, disks); + self.storage_view.draw(self.alloc, win, self.storage_state.pools, self.storage_state.vm_disks); } }, .backups => { - const backups = self.backup_state.getBackups(); - const k8s_backups = self.backup_state.getK8sBackups(); - if (self.backup_state.isLoading() and backups.len == 0 and k8s_backups.len == 0) { - self.drawPlaceholder(win, "Loading backup data..."); - } else { - self.backup_view.draw(win, backups, k8s_backups); - - // Check for pending delete action - if (self.backup_view.consumeDeleteAction(backups)) |action| { - self.executeDelete(action); + var action: ?DeleteAction = null; + self.backup_state.lock(); + { + defer self.backup_state.unlock(); + + if (self.backup_state.is_loading and + self.backup_state.backups.len == 0 and + self.backup_state.k8s_backups.len == 0) + { + self.drawPlaceholder(win, "Loading backup data..."); + } else { + self.backup_view.draw(win, self.backup_state.backups, self.backup_state.k8s_backups); + + // Copy action data while the backing rows are still locked. + if (self.backup_view.consumeDeleteAction(self.backup_state.backups)) |pending| { + const node = self.alloc.dupe(u8, pending.node) catch return; + const storage = self.alloc.dupe(u8, pending.storage) catch { + self.alloc.free(node); + return; + }; + const volid = self.alloc.dupe(u8, pending.volid) catch { + self.alloc.free(storage); + self.alloc.free(node); + return; + }; + action = .{ + .node = node, + .storage = storage, + .volid = volid, + }; + } } } + + if (action) |owned_action| { + defer self.alloc.free(owned_action.node); + defer self.alloc.free(owned_action.storage); + defer self.alloc.free(owned_action.volid); + self.executeDelete(owned_action); + } }, .performance => { - const hosts = self.perf_state.getHosts(); - const pods = self.perf_state.getPods(); - const available = self.perf_state.isMetricsAvailable(); - if (self.perf_state.isLoading() and hosts.len == 0) { + self.perf_state.lock(); + defer self.perf_state.unlock(); + + if (self.perf_state.is_loading and self.perf_state.hosts.len == 0) { self.drawPlaceholder(win, "Loading performance data..."); } else { - self.perf_view.draw(self.alloc, win, hosts, pods, available); + self.perf_view.draw( + self.alloc, + win, + self.perf_state.hosts, + self.perf_state.pods, + self.perf_state.metrics_available, + ); } }, } diff --git a/tui/src/config.zig b/tui/src/config.zig index edf3707..e47b91d 100644 --- a/tui/src/config.zig +++ b/tui/src/config.zig @@ -5,6 +5,8 @@ const Allocator = std.mem.Allocator; const Value = yaml.Yaml.Value; const Map = yaml.Yaml.Map; +var discover_path_buf: [std.fs.max_path_bytes]u8 = undefined; + // ── Config types ───────────────────────────────────────────────────── pub const Config = struct { @@ -277,10 +279,17 @@ pub fn expandEnvVars(alloc: Allocator, input: []const u8) ![]const u8 { /// Discover the config file path using standard search order. pub fn discover() ![]const u8 { if (std.posix.getenv("PVT_CONFIG")) |p| { - std.fs.cwd().access(p, .{}) catch {}; + std.fs.cwd().access(p, .{}) catch return error.ConfigNotFound; return p; } - std.fs.cwd().access("pvt.yaml", .{}) catch return error.ConfigNotFound; + std.fs.cwd().access("pvt.yaml", .{}) catch { + const home = std.posix.getenv("HOME") orelse return error.ConfigNotFound; + const fallback = std.fmt.bufPrint(&discover_path_buf, "{s}/.config/pvt/config.yaml", .{home}) catch { + return error.ConfigNotFound; + }; + std.fs.cwd().access(fallback, .{}) catch return error.ConfigNotFound; + return fallback; + }; return "pvt.yaml"; } diff --git a/tui/src/poll.zig b/tui/src/poll.zig index 2adbcb2..67abd94 100644 --- a/tui/src/poll.zig +++ b/tui/src/poll.zig @@ -77,16 +77,12 @@ pub const StorageState = struct { self.last_refresh_epoch = std.time.timestamp(); } - pub fn getPools(self: *StorageState) []StoragePoolRow { + pub fn lock(self: *StorageState) void { self.mutex.lock(); - defer self.mutex.unlock(); - return self.pools; } - pub fn getVmDisks(self: *StorageState) []VmDiskRow { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.vm_disks; + pub fn unlock(self: *StorageState) void { + self.mutex.unlock(); } pub fn isLoading(self: *StorageState) bool { @@ -165,16 +161,12 @@ pub const BackupState = struct { self.last_refresh_epoch = std.time.timestamp(); } - pub fn getBackups(self: *BackupState) []BackupRow { + pub fn lock(self: *BackupState) void { self.mutex.lock(); - defer self.mutex.unlock(); - return self.backups; } - pub fn getK8sBackups(self: *BackupState) []K8sBackupRow { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.k8s_backups; + pub fn unlock(self: *BackupState) void { + self.mutex.unlock(); } pub fn isLoading(self: *BackupState) bool { @@ -256,16 +248,12 @@ pub const PerfState = struct { self.last_refresh_epoch = std.time.timestamp(); } - pub fn getHosts(self: *PerfState) []HostRow { + pub fn lock(self: *PerfState) void { self.mutex.lock(); - defer self.mutex.unlock(); - return self.hosts; } - pub fn getPods(self: *PerfState) []PodMetricRow { - self.mutex.lock(); - defer self.mutex.unlock(); - return self.pods; + pub fn unlock(self: *PerfState) void { + self.mutex.unlock(); } pub fn isMetricsAvailable(self: *PerfState) bool { @@ -354,13 +342,12 @@ pub const ClusterState = struct { self.last_refresh_epoch = std.time.timestamp(); } - /// Get a snapshot of the current rows. Caller must NOT free. - /// Safe to read while mutex is not held — the pointer is stable - /// until the next swapRows call. - pub fn getRows(self: *ClusterState) []NodeRow { + pub fn lock(self: *ClusterState) void { self.mutex.lock(); - defer self.mutex.unlock(); - return self.rows; + } + + pub fn unlock(self: *ClusterState) void { + self.mutex.unlock(); } pub fn getLastRefresh(self: *ClusterState) i64 { diff --git a/tui/src/views/backups.zig b/tui/src/views/backups.zig index 1fe9643..a4fad57 100644 --- a/tui/src/views/backups.zig +++ b/tui/src/views/backups.zig @@ -391,8 +391,7 @@ pub const BackupView = struct { } // Check if confirm was just accepted if (!self.show_confirm and self.pending_idx != null) { - const idx = self.pending_idx.?; - if (idx < backups.len) { + if (self.filteredBackupIndex(backups, self.pending_idx.?)) |idx| { const b = backups[idx]; self.pending_idx = null; return .{ @@ -417,6 +416,17 @@ pub const BackupView = struct { fn truncate(s: []const u8, max: usize) []const u8 { return if (s.len > max) s[0..max] else s; } + + fn filteredBackupIndex(self: *BackupView, backups: []const poll.BackupRow, filtered_idx: u16) ?u16 { + const filter = if (self.filter_len > 0) self.filter_buf[0..self.filter_len] else ""; + var matched: u16 = 0; + for (backups, 0..) |b, i| { + if (!self.matchesFilter(b, filter)) continue; + if (matched == filtered_idx) return @intCast(i); + matched += 1; + } + return null; + } }; /// Case-insensitive substring check (ASCII only). From d8bc4174490b08cea4b12b73e40246f9bfcd6cd3 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 11:29:25 +0100 Subject: [PATCH 11/14] fix: address additional PR review feedback --- tui/src/api/metrics.zig | 19 +++++++++++++++++++ tui/src/api/proxmox.zig | 15 +++++++++++---- tui/src/app.zig | 6 ++++++ tui/src/poll.zig | 17 +++++++++++++++++ 4 files changed, 53 insertions(+), 4 deletions(-) diff --git a/tui/src/api/metrics.zig b/tui/src/api/metrics.zig index d86ede3..70cf272 100644 --- a/tui/src/api/metrics.zig +++ b/tui/src/api/metrics.zig @@ -38,6 +38,10 @@ pub const MetricsClient = struct { for (candidates) |c| { const endpoint = detectEndpoint(allocator, kubeconfig, c.ns, c.svc, c.port); if (endpoint) |ep| { + if (!probeEndpoint(allocator, ep)) { + allocator.free(ep); + continue; + } return .{ .allocator = allocator, .endpoint = ep, @@ -78,6 +82,21 @@ pub const MetricsClient = struct { return null; } + fn probeEndpoint(allocator: Allocator, endpoint: []const u8) bool { + const url = std.fmt.allocPrint(allocator, "{s}/api/v1/query?query=up", .{endpoint}) catch return false; + defer allocator.free(url); + + const result = std.process.Child.run(.{ + .allocator = allocator, + .argv = &.{ "curl", "-s", "-f", "--max-time", "5", url }, + .max_output_bytes = 16 * 1024, + }) catch return false; + defer allocator.free(result.stderr); + defer allocator.free(result.stdout); + + return result.term == .Exited and result.term.Exited == 0; + } + /// Query pod CPU usage via PromQL. pub fn getPodCpu(self: *MetricsClient) []PodMetricValue { if (!self.available) return &.{}; diff --git a/tui/src/api/proxmox.zig b/tui/src/api/proxmox.zig index 264f94b..deccde2 100644 --- a/tui/src/api/proxmox.zig +++ b/tui/src/api/proxmox.zig @@ -220,14 +220,14 @@ pub const ProxmoxClient = struct { /// Delete a backup by volume ID. pub fn deleteBackup(self: *ProxmoxClient, node: []const u8, storage: []const u8, volid: []const u8) !void { - // URL-encode the volid (colons → %3A) + // Percent-encode the volid as a single path segment. var encoded: std.ArrayListUnmanaged(u8) = .empty; defer encoded.deinit(self.allocator); for (volid) |c| { - if (c == ':') { - try encoded.appendSlice(self.allocator, "%3A"); - } else { + if (isUnreserved(c)) { try encoded.append(self.allocator, c); + } else { + try std.fmt.format(encoded.writer(self.allocator), "%{X:0>2}", .{c}); } } @@ -244,3 +244,10 @@ pub const ProxmoxClient = struct { self.client.deinit(); } }; + +fn isUnreserved(c: u8) bool { + return switch (c) { + 'A'...'Z', 'a'...'z', '0'...'9', '-', '.', '_', '~' => true, + else => false, + }; +} diff --git a/tui/src/app.zig b/tui/src/app.zig index fdbd82c..d7ab155 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -158,6 +158,7 @@ pub const App = struct { // Now that self is at its final address, wire up runtime pointers. self.poller.cfg = &self.cfg; self.loop = .{ .tty = &self.tty, .vaxis = &self.vx }; + self.poller.setRefreshNotifier(self, postRefreshEvent); try self.loop.init(); try self.loop.start(); @@ -177,6 +178,11 @@ pub const App = struct { } } + fn postRefreshEvent(context: *anyopaque) void { + const self: *App = @ptrCast(@alignCast(context)); + _ = self.loop.tryPostEvent(.data_refresh); + } + fn handleEvent(self: *App, alloc: std.mem.Allocator, event: Event) !void { switch (event) { .key_press => |key| self.handleKey(key), diff --git a/tui/src/poll.zig b/tui/src/poll.zig index 67abd94..2713383 100644 --- a/tui/src/poll.zig +++ b/tui/src/poll.zig @@ -395,6 +395,8 @@ pub const Poller = struct { should_stop: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), force_refresh: std.atomic.Value(bool) = std.atomic.Value(bool).init(false), thread: ?std.Thread = null, + refresh_context: ?*anyopaque = null, + refresh_callback: ?*const fn (*anyopaque) void = null, allocator: Allocator, pub fn init( @@ -433,6 +435,15 @@ pub const Poller = struct { self.force_refresh.store(true, .release); } + pub fn setRefreshNotifier( + self: *Poller, + context: *anyopaque, + callback: *const fn (*anyopaque) void, + ) void { + self.refresh_context = context; + self.refresh_callback = callback; + } + fn pollLoop(self: *Poller) void { while (!self.should_stop.load(.acquire)) { self.fetchAll(); @@ -562,6 +573,12 @@ pub const Poller = struct { // Fetch performance data self.fetchPerformance(); + + if (self.refresh_callback) |callback| { + if (self.refresh_context) |context| { + callback(context); + } + } } fn fetchStorage(self: *Poller) void { From 5b5dcd6e67d146b2db8744f33b2b9f092a957ce1 Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 12:10:22 +0100 Subject: [PATCH 12/14] fix: address latest PR review feedback --- tui/src/poll.zig | 13 +++++++------ tui/src/views/performance.zig | 8 +++++++- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/tui/src/poll.zig b/tui/src/poll.zig index 2713383..553df72 100644 --- a/tui/src/poll.zig +++ b/tui/src/poll.zig @@ -527,12 +527,17 @@ pub const Poller = struct { // Fetch Talos version for this node var talos_ver: []const u8 = "-"; var k8s_ver: []const u8 = "-"; + var version_result: ?talos.TalosVersion = null; if (talos_client.getVersion(node.ip)) |ver| { + version_result = ver; talos_ver = ver.talos_version; k8s_ver = ver.kubernetes_version; - // Note: ver.node is freed by us since we'll dupe the strings below - alloc.free(ver.node); } + defer if (version_result) |ver| { + alloc.free(ver.node); + alloc.free(ver.talos_version); + alloc.free(ver.kubernetes_version); + }; // Determine health const health: []const u8 = if (std.mem.eql(u8, vm_status, "running")) @@ -555,10 +560,6 @@ pub const Poller = struct { .etcd = alloc.dupe(u8, etcd_role) catch continue, .health = alloc.dupe(u8, health) catch continue, }) catch continue; - - // Free talos version strings if they were allocated - if (!std.mem.eql(u8, talos_ver, "-")) alloc.free(talos_ver); - if (!std.mem.eql(u8, k8s_ver, "-")) alloc.free(k8s_ver); } } diff --git a/tui/src/views/performance.zig b/tui/src/views/performance.zig index 7861f58..09c951f 100644 --- a/tui/src/views/performance.zig +++ b/tui/src/views/performance.zig @@ -154,7 +154,13 @@ pub const PerformanceView = struct { self.sortPods(filtered.items); self.num_pods = @intCast(filtered.items.len); - if (self.selected >= self.num_pods and self.num_pods > 0) self.selected = self.num_pods - 1; + if (self.num_pods == 0) { + self.selected = 0; + self.scroll = 0; + } else { + if (self.selected >= self.num_pods) self.selected = self.num_pods - 1; + if (self.scroll >= self.num_pods) self.scroll = self.num_pods - 1; + } // Pod header { From 76f8f54ead3433857fa6863dfdff16968c7a474d Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 12:19:20 +0100 Subject: [PATCH 13/14] fix: scope backup deletes to stable backup identity --- tui/src/app.zig | 25 +++-------- tui/src/poll.zig | 3 ++ tui/src/views/backups.zig | 89 ++++++++++++++++++++++++--------------- 3 files changed, 63 insertions(+), 54 deletions(-) diff --git a/tui/src/app.zig b/tui/src/app.zig index d7ab155..3d312ff 100644 --- a/tui/src/app.zig +++ b/tui/src/app.zig @@ -108,7 +108,7 @@ pub const App = struct { .cluster_state = state, .storage_view = StorageView.init(cfg.tui_settings.warn_threshold, cfg.tui_settings.crit_threshold), .storage_state = storage_state, - .backup_view = BackupView.init(cfg.tui_settings.stale_days), + .backup_view = BackupView.init(alloc, cfg.tui_settings.stale_days), .backup_state = backup_state, .perf_view = PerformanceView.init(), .perf_state = perf_state, @@ -395,27 +395,12 @@ pub const App = struct { self.backup_view.draw(win, self.backup_state.backups, self.backup_state.k8s_backups); // Copy action data while the backing rows are still locked. - if (self.backup_view.consumeDeleteAction(self.backup_state.backups)) |pending| { - const node = self.alloc.dupe(u8, pending.node) catch return; - const storage = self.alloc.dupe(u8, pending.storage) catch { - self.alloc.free(node); - return; - }; - const volid = self.alloc.dupe(u8, pending.volid) catch { - self.alloc.free(storage); - self.alloc.free(node); - return; - }; - action = .{ - .node = node, - .storage = storage, - .volid = volid, - }; - } + action = self.backup_view.consumeDeleteAction(); } } if (action) |owned_action| { + defer self.alloc.free(owned_action.proxmox_cluster); defer self.alloc.free(owned_action.node); defer self.alloc.free(owned_action.storage); defer self.alloc.free(owned_action.volid); @@ -442,11 +427,11 @@ pub const App = struct { } fn executeDelete(self: *App, action: DeleteAction) void { - // Find matching PVE cluster config for this node for (self.cfg.proxmox.clusters) |pc| { + if (!std.mem.eql(u8, pc.name, action.proxmox_cluster)) continue; var client = proxmox_api.ProxmoxClient.init(self.alloc, pc); defer client.deinit(); - client.deleteBackup(action.node, action.storage, action.volid) catch continue; + client.deleteBackup(action.node, action.storage, action.volid) catch return; // Trigger refresh to show updated list self.poller.triggerRefresh(); return; diff --git a/tui/src/poll.zig b/tui/src/poll.zig index 553df72..f1c568a 100644 --- a/tui/src/poll.zig +++ b/tui/src/poll.zig @@ -117,6 +117,7 @@ pub const StorageState = struct { /// A single row in the backups table. pub const BackupRow = struct { + proxmox_cluster: []const u8, volid: []const u8, node: []const u8, storage: []const u8, @@ -177,6 +178,7 @@ pub const BackupState = struct { fn freeDataInternal(self: *BackupState) void { for (self.backups) |row| { + self.allocator.free(row.proxmox_cluster); self.allocator.free(row.volid); self.allocator.free(row.node); self.allocator.free(row.storage); @@ -708,6 +710,7 @@ pub const Poller = struct { const is_stale = age_days > self.cfg.tui_settings.stale_days; backups_list.append(alloc, .{ + .proxmox_cluster = alloc.dupe(u8, pc.name) catch continue, .volid = alloc.dupe(u8, entry.volid) catch continue, .node = alloc.dupe(u8, entry.node) catch continue, .storage = alloc.dupe(u8, entry.storage) catch continue, diff --git a/tui/src/views/backups.zig b/tui/src/views/backups.zig index a4fad57..e3ea0ac 100644 --- a/tui/src/views/backups.zig +++ b/tui/src/views/backups.zig @@ -3,6 +3,7 @@ const vaxis = @import("vaxis"); const poll = @import("../poll.zig"); pub const DeleteAction = struct { + proxmox_cluster: []const u8, node: []const u8, storage: []const u8, volid: []const u8, @@ -13,6 +14,7 @@ pub const BackupView = struct { scroll: u16 = 0, num_backups: u16 = 0, stale_days: u32, + allocator: std.mem.Allocator, // Total row count across both sections (for navigation) total_rows: u16 = 0, @@ -20,6 +22,7 @@ pub const BackupView = struct { // Confirmation dialog state show_confirm: bool = false, pending_idx: ?u16 = null, + pending_delete: ?DeleteAction = null, // Set by handleKey when user confirms deletion delete_action: ?DeleteAction = null, @@ -32,19 +35,25 @@ pub const BackupView = struct { const pve_col_header = " VM Name VMID Date Size Storage Age"; const k8s_col_header = " Name Namespace Source Status Schedule Last Run"; - pub fn init(stale_days: u32) BackupView { - return .{ .stale_days = stale_days }; + pub fn init(allocator: std.mem.Allocator, stale_days: u32) BackupView { + return .{ + .stale_days = stale_days, + .allocator = allocator, + }; } pub fn handleKey(self: *BackupView, key: vaxis.Key) void { // Confirmation dialog intercepts all input if (self.show_confirm) { if (key.matches('y', .{})) { + self.delete_action = self.pending_delete; + self.pending_delete = null; + self.pending_idx = null; self.show_confirm = false; } else if (key.matches('n', .{}) or key.matches(vaxis.Key.escape, .{})) { self.show_confirm = false; + self.clearPendingDelete(); self.pending_idx = null; - self.delete_action = null; } return; } @@ -96,6 +105,7 @@ pub const BackupView = struct { } else if (key.matches('d', .{})) { // Only allow deletion on PVE backup rows if (self.selected < self.num_backups) { + self.clearPendingDelete(); self.pending_idx = self.selected; self.show_confirm = true; self.delete_action = null; @@ -221,22 +231,18 @@ pub const BackupView = struct { // Confirmation dialog overlay if (self.show_confirm) { - if (self.pending_idx) |idx| { - // Map filtered idx back to actual backup - var actual_idx: u16 = 0; - var filtered_count: u16 = 0; - for (backups, 0..) |_, i| { - if (self.matchesFilter(backups[i], filter)) { - if (filtered_count == idx) { - actual_idx = @intCast(i); - break; - } - filtered_count += 1; + if (self.pending_delete == null) { + if (self.pending_idx) |idx| { + if (self.filteredBackupIndex(backups, idx)) |actual_idx| { + self.pending_delete = self.actionFromBackup(backups[actual_idx]) catch null; + } else { + self.show_confirm = false; + self.pending_idx = null; } } - if (actual_idx < backups.len) { - self.drawConfirmDialog(win, backups[actual_idx]); - } + } + if (self.pending_delete) |action| { + self.drawConfirmDialog(win, action.volid); } } } @@ -343,7 +349,7 @@ pub const BackupView = struct { }); } - fn drawConfirmDialog(self: *BackupView, win: vaxis.Window, backup: poll.BackupRow) void { + fn drawConfirmDialog(self: *BackupView, win: vaxis.Window, volid: []const u8) void { _ = self; const box_w: u16 = 52; const box_h: u16 = 7; @@ -370,7 +376,7 @@ pub const BackupView = struct { }); var name_buf: [48]u8 = undefined; - const name_line = std.fmt.bufPrint(&name_buf, " {s}", .{truncate(backup.volid, 46)}) catch " ?"; + const name_line = std.fmt.bufPrint(&name_buf, " {s}", .{truncate(volid, 46)}) catch " ?"; _ = dialog.print(&.{.{ .text = name_line, .style = text_style }}, .{ .row_offset = 2, .wrap = .none, @@ -383,23 +389,12 @@ pub const BackupView = struct { } /// Check if there's a pending delete action and consume it. - pub fn consumeDeleteAction(self: *BackupView, backups: []const poll.BackupRow) ?DeleteAction { + pub fn consumeDeleteAction(self: *BackupView) ?DeleteAction { if (self.delete_action != null) { - self.delete_action = null; self.pending_idx = null; - return null; - } - // Check if confirm was just accepted - if (!self.show_confirm and self.pending_idx != null) { - if (self.filteredBackupIndex(backups, self.pending_idx.?)) |idx| { - const b = backups[idx]; - self.pending_idx = null; - return .{ - .node = b.node, - .storage = b.storage, - .volid = b.volid, - }; - } + const action = self.delete_action.?; + self.delete_action = null; + return action; } return null; } @@ -427,6 +422,32 @@ pub const BackupView = struct { } return null; } + + fn actionFromBackup(self: *BackupView, backup: poll.BackupRow) !DeleteAction { + const proxmox_cluster = try self.allocator.dupe(u8, backup.proxmox_cluster); + errdefer self.allocator.free(proxmox_cluster); + const node = try self.allocator.dupe(u8, backup.node); + errdefer self.allocator.free(node); + const storage = try self.allocator.dupe(u8, backup.storage); + errdefer self.allocator.free(storage); + const volid = try self.allocator.dupe(u8, backup.volid); + return .{ + .proxmox_cluster = proxmox_cluster, + .node = node, + .storage = storage, + .volid = volid, + }; + } + + fn clearPendingDelete(self: *BackupView) void { + if (self.pending_delete) |action| { + self.allocator.free(action.proxmox_cluster); + self.allocator.free(action.node); + self.allocator.free(action.storage); + self.allocator.free(action.volid); + self.pending_delete = null; + } + } }; /// Case-insensitive substring check (ASCII only). From ccba27fd439d680e2e0d4422b827ac7ac193dfbd Mon Sep 17 00:00:00 2001 From: OneNoted Date: Thu, 19 Mar 2026 12:30:22 +0100 Subject: [PATCH 14/14] fix: harden performance table sorting and viewport handling --- tui/src/poll.zig | 4 ++++ tui/src/views/performance.zig | 17 ++++++++++++----- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/tui/src/poll.zig b/tui/src/poll.zig index f1c568a..cbdf1c7 100644 --- a/tui/src/poll.zig +++ b/tui/src/poll.zig @@ -223,6 +223,8 @@ pub const PodMetricRow = struct { net_tx_str: []const u8, // e.g. "0.5 KiB/s" cpu_cores: f64, // for sorting mem_bytes: f64, // for sorting + net_rx_bytes_sec: f64, // for sorting + net_tx_bytes_sec: f64, // for sorting }; /// Thread-safe shared state for performance view data. @@ -925,6 +927,8 @@ pub const Poller = struct { .net_tx_str = formatRate(alloc, tx_val), .cpu_cores = cpu.value, .mem_bytes = mem_val, + .net_rx_bytes_sec = rx_val, + .net_tx_bytes_sec = tx_val, }) catch continue; } } diff --git a/tui/src/views/performance.zig b/tui/src/views/performance.zig index 09c951f..01152bc 100644 --- a/tui/src/views/performance.zig +++ b/tui/src/views/performance.zig @@ -193,6 +193,10 @@ pub const PerformanceView = struct { // Scrolling const visible = win.height -| current_row -| 1; + if (visible == 0) { + self.scroll = 0; + return; + } if (self.selected < self.scroll) { self.scroll = self.selected; } else if (self.selected >= self.scroll + visible) { @@ -352,19 +356,22 @@ pub const PerformanceView = struct { } }.cmp), .net_rx => { - // Sort by pod name as fallback since we don't store raw rx value in PodMetricRow std.mem.sort(poll.PodMetricRow, items, asc, struct { fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - const ord = std.mem.order(u8, a.net_rx_str, b.net_rx_str); - return if (ascending) ord == .lt else ord == .gt; + return if (ascending) + a.net_rx_bytes_sec < b.net_rx_bytes_sec + else + a.net_rx_bytes_sec > b.net_rx_bytes_sec; } }.cmp); }, .net_tx => { std.mem.sort(poll.PodMetricRow, items, asc, struct { fn cmp(ascending: bool, a: poll.PodMetricRow, b: poll.PodMetricRow) bool { - const ord = std.mem.order(u8, a.net_tx_str, b.net_tx_str); - return if (ascending) ord == .lt else ord == .gt; + return if (ascending) + a.net_tx_bytes_sec < b.net_tx_bytes_sec + else + a.net_tx_bytes_sec > b.net_tx_bytes_sec; } }.cmp); },