From 1d61739a41cdce8bcb1b50be93f990cc34ad510c Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 12:21:05 -0800 Subject: [PATCH 01/20] Fix clippy warnings Mostly mechanical fixes: removing needless borrows, adding lifetime annotations, using modern numeric constants (i32::MAX), and suppressing warnings for dropshot-generated dead code. Notable equivalences: - .as_bytes().len() to .len() on String: both return byte count - .trim().split_whitespace() to .split_whitespace(): split_whitespace already skips leading/trailing whitespace - .into_iter() to .iter() on slices: both yield &T --- agent/src/control/mod.rs | 10 +++++----- agent/src/exec.rs | 9 +++------ agent/src/main.rs | 2 +- bin/src/main.rs | 8 ++++---- download/src/kinds/url.rs | 1 + download/src/lib.rs | 2 +- factory/aws/src/aws.rs | 9 ++++----- factory/aws/src/main.rs | 2 +- factory/gimlet/src/cleanup.rs | 16 ++++++++-------- factory/gimlet/src/disks.rs | 8 ++++---- factory/gimlet/src/host.rs | 10 +++++----- factory/gimlet/src/humility.rs | 3 ++- factory/gimlet/src/instance.rs | 2 +- factory/lab/src/main.rs | 2 ++ factory/lab/src/minder.rs | 4 ++-- factory/propolis/src/config.rs | 2 +- factory/propolis/src/main.rs | 2 +- factory/propolis/src/vm.rs | 4 ++-- github/database/src/tables/check_suite.rs | 2 +- github/dbtool/src/main.rs | 16 ++++++++-------- github/server/src/http.rs | 6 +++--- github/server/src/main.rs | 3 ++- github/server/src/variety/basic.rs | 10 +++++----- jobsh/src/variety/basic.rs | 2 +- server/src/db/mod.rs | 2 +- server/src/main.rs | 1 + server/src/workers.rs | 6 ++---- sse/src/lib.rs | 2 +- 28 files changed, 73 insertions(+), 73 deletions(-) diff --git a/agent/src/control/mod.rs b/agent/src/control/mod.rs index b26493a..832f4de 100644 --- a/agent/src/control/mod.rs +++ b/agent/src/control/mod.rs @@ -178,13 +178,13 @@ async fn cmd_address_list(mut l: Level) -> Result<()> { let mut r = Row::default(); r.add_str("name", &addr.name); - r.add_str("cidr", &net.to_string()); - r.add_str("first", &first.to_string()); - r.add_str("last", &last.to_string()); + r.add_str("cidr", net.to_string()); + r.add_str("first", first.to_string()); + r.add_str("last", last.to_string()); r.add_u64("count", addr.count.into()); r.add_str("family", "inet"); - r.add_str("network", &net.network().to_string()); - r.add_str("mask", &net.netmask().to_string()); + r.add_str("network", net.network().to_string()); + r.add_str("mask", net.netmask().to_string()); r.add_str("routed", if addr.routed { "yes" } else { "no" }); r.add_str("gateway", addr.gateway.as_deref().unwrap_or("-")); diff --git a/agent/src/exec.rs b/agent/src/exec.rs index 6a82677..e7c8184 100644 --- a/agent/src/exec.rs +++ b/agent/src/exec.rs @@ -23,10 +23,7 @@ fn spawn_reader( where T: Read + Send + 'static, { - let stream = match stream { - Some(stream) => stream, - None => return None, - }; + let stream = stream?; Some(std::thread::spawn(move || { let mut r = BufReader::new(stream); @@ -297,7 +294,7 @@ fn run_common( * process. */ if ab.bgproc.is_none() { - tx.blocking_send(ab.exit(&start, &end, std::i32::MAX)) + tx.blocking_send(ab.exit(&start, &end, i32::MAX)) .unwrap(); } @@ -332,7 +329,7 @@ fn run_common( let code = if let Some(code) = es.code() { code } else { - std::i32::MAX + i32::MAX }; tx.blocking_send(ab.exit(&start, &end, code)).unwrap(); stdio_warning diff --git a/agent/src/main.rs b/agent/src/main.rs index 4705440..c32b02a 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -1093,7 +1093,7 @@ async fn cmd_install(mut l: Level) -> Result<()> { let status = Command::new("/sbin/zfs") .arg("create") .arg("-o") - .arg(&format!("mountpoint={}", INPUT_PATH)) + .arg(format!("mountpoint={}", INPUT_PATH)) .arg(INPUT_DATASET) .env_clear() .current_dir("/") diff --git a/bin/src/main.rs b/bin/src/main.rs index 71b340c..ad7a5d5 100644 --- a/bin/src/main.rs +++ b/bin/src/main.rs @@ -26,11 +26,11 @@ mod config; trait FlagsExt { #[must_use] - fn add_flags(&mut self, name: &'static str) -> Flags; + fn add_flags(&mut self, name: &'static str) -> Flags<'_>; } impl FlagsExt for Row { - fn add_flags(&mut self, name: &'static str) -> Flags { + fn add_flags(&mut self, name: &'static str) -> Flags<'_> { Flags { row: self, name, out: String::new() } } } @@ -1226,7 +1226,7 @@ async fn do_job_store_list(mut l: Level) -> Result<()> { ); r.add_str( "updated", - &ent.time_update.to_rfc3339_opts(chrono::SecondsFormat::Secs, true), + ent.time_update.to_rfc3339_opts(chrono::SecondsFormat::Secs, true), ); t.add_row(r); @@ -1351,7 +1351,7 @@ async fn do_user_list(mut l: Level) -> Result<()> { r.add_str("name", &u.name); r.add_str( "creation", - &u.time_create.to_rfc3339_opts(chrono::SecondsFormat::Secs, true), + u.time_create.to_rfc3339_opts(chrono::SecondsFormat::Secs, true), ); r.add_age("age", u.time_create.age()); t.add_row(r); diff --git a/download/src/kinds/url.rs b/download/src/kinds/url.rs index 83c4131..c027d93 100644 --- a/download/src/kinds/url.rs +++ b/download/src/kinds/url.rs @@ -27,6 +27,7 @@ const BUF_COUNT: usize = 10; * assessed against the actual file size and a partial (206) response will be * returned, for both GET and HEAD. */ +#[allow(clippy::too_many_arguments)] pub async fn stream_from_url( log: &Logger, info: String, diff --git a/download/src/lib.rs b/download/src/lib.rs index 5a5ec4b..03c3e55 100644 --- a/download/src/lib.rs +++ b/download/src/lib.rs @@ -43,7 +43,7 @@ fn make_get_response( rx: mpsc::Receiver, E>>, ) -> Result> where - E: Into> + E: Into> + Send + Sync + 'static, diff --git a/factory/aws/src/aws.rs b/factory/aws/src/aws.rs index 237d34f..f455dcd 100644 --- a/factory/aws/src/aws.rs +++ b/factory/aws/src/aws.rs @@ -70,7 +70,7 @@ impl Instance { .try_into() .unwrap(); - (if when <= now { now - when } else { 0 }) / 1000 + now.saturating_sub(when) / 1000 }) .unwrap_or(0) } @@ -188,7 +188,7 @@ async fn create_instance( let mut instances = res .instances() - .into_iter() + .iter() .map(|i| Instance::from((i, config.aws.tag.as_str()))) .collect::>(); @@ -218,13 +218,12 @@ async fn instances( Ok(res .reservations() .iter() - .map(|r| { + .flat_map(|r| { r.instances().iter().map(|i| { let i = Instance::from((i, tag)); (i.id.to_string(), i) }) }) - .flatten() .collect()) } @@ -421,7 +420,7 @@ async fn aws_worker_one( * There is a record of a particular instance ID for this worker. * Check to see if that instance exists. */ - if let Some(i) = insts.get(&instance_id.to_string()) { + if let Some(i) = insts.get(instance_id) { if i.state == "terminated" { /* * The instance exists, but is terminated. Delete the diff --git a/factory/aws/src/main.rs b/factory/aws/src/main.rs index 4c7c96c..f3a510d 100644 --- a/factory/aws/src/main.rs +++ b/factory/aws/src/main.rs @@ -72,7 +72,7 @@ impl Central { * Allow the per-target diagnostic configuration to override the base * diagnostic configuration. */ - Ok(self.config.diag.apply_overrides(&t.diag).build()?) + self.config.diag.apply_overrides(&t.diag).build() } } diff --git a/factory/gimlet/src/cleanup.rs b/factory/gimlet/src/cleanup.rs index 2919a57..9eccb3c 100644 --- a/factory/gimlet/src/cleanup.rs +++ b/factory/gimlet/src/cleanup.rs @@ -110,7 +110,7 @@ pub fn setup() -> Result<()> { bail!("giving up after {MAX_TIME} seconds"); } - if let Ok(st) = svcs(&fmri) { + if let Ok(st) = svcs(fmri) { if st.next.is_none() && st.current == "ON" { println!(" * {fmri} now online!"); break; @@ -125,7 +125,7 @@ pub fn setup() -> Result<()> { * only be one! */ let pools = zpool_unimported_list()?; - if pools.len() == 0 { + if pools.is_empty() { bail!("no unimported pool found!"); } else if pools.len() > 1 { bail!("more than one unimported pool found!"); @@ -137,14 +137,14 @@ pub fn setup() -> Result<()> { }; println!(" * importing pool {pool:?}..."); - zpool_import(&pool)?; + zpool_import(pool)?; /* * Update the BSU symlink: */ std::fs::create_dir_all("/pool/bsu")?; std::fs::remove_file("/pool/bsu/0").ok(); - std::os::unix::fs::symlink(&format!("../int/{pool_id}"), "/pool/bsu/0")?; + std::os::unix::fs::symlink(format!("../int/{pool_id}"), "/pool/bsu/0")?; /* * Create the swap device: @@ -252,14 +252,14 @@ fn prepare_m2() -> Result<()> { let p = &mut vtoc.parts_mut()[i]; p.p_tag = efi::sys::V_USR; p.p_start = next_start; - p.p_size = size.into(); + p.p_size = size; next_start = next_start.checked_add(size).unwrap(); } /* * Create the dump slice: */ - let size = (1 * 1024 * 1024 * 1024 * 1024) / vtoc.block_size(); + let size = (1024 * 1024 * 1024 * 1024) / vtoc.block_size(); let p = &mut vtoc.parts_mut()[4]; p.p_tag = efi::sys::V_USR; p.p_start = next_start; @@ -287,11 +287,11 @@ fn prepare_m2() -> Result<()> { .env_clear() .arg("create") .arg("-O") - .arg(&format!("mountpoint=/pool/int/{id}")) + .arg(format!("mountpoint=/pool/int/{id}")) .arg("-O") .arg("compress=on") .arg(&pool) - .arg(&format!("{}s5", d.name)) + .arg(format!("{}s5", d.name)) .output()?; if !out.status.success() { diff --git a/factory/gimlet/src/disks.rs b/factory/gimlet/src/disks.rs index c7fe058..d738684 100644 --- a/factory/gimlet/src/disks.rs +++ b/factory/gimlet/src/disks.rs @@ -121,7 +121,7 @@ pub fn list_disks() -> Result { slice: u32| -> Result> { let mut paths = links - .links_for_path(&device)? + .links_for_path(device)? .into_iter() .filter(|l| { l.linktype() == devinfo::DevLinkType::Primary @@ -193,18 +193,18 @@ pub fn list_disks() -> Result { */ let device = m.devfs_path()?; - Ok((0..=8) + (0..=8) .map(|num| { Ok(slice_from_path(&device, num)?.map(|link| { (num, m.spec_type(), link.path().to_owned()) })) }) - .collect::>>()?) + .collect::>>() }) .collect::>>()? .into_iter() .flatten() - .filter_map(|v| v) + .flatten() .collect::>(); /* diff --git a/factory/gimlet/src/host.rs b/factory/gimlet/src/host.rs index 399c987..74cf715 100644 --- a/factory/gimlet/src/host.rs +++ b/factory/gimlet/src/host.rs @@ -624,7 +624,7 @@ impl HostManager { Ok(()) } HostState::Cleaning(cst) => { - let mut new_cst = cst.clone(); + let mut new_cst = *cst; { let mut l = self.locked.lock().unwrap(); @@ -700,7 +700,7 @@ impl HostManager { Ok(()) } HostState::Starting(sst, hgs) => { - let mut new_sst = sst.clone(); + let mut new_sst = *sst; { let mut l = self.locked.lock().unwrap(); @@ -726,7 +726,7 @@ impl HostManager { } } - if let Err(e) = self.thread_starting(&mut new_sst, &hgs) { + if let Err(e) = self.thread_starting(&mut new_sst, hgs) { error!(log, "starting error: {e}"); } @@ -794,7 +794,7 @@ impl HostManager { let pb = sys .ssh_exec("svcs -Ho sta,nsta svc:/site/postboot:default")?; - let t = pb.out.trim().split_whitespace().collect::>(); + let t = pb.out.split_whitespace().collect::>(); if t.len() != 2 || t[0] == "ON" || t[1] == "-" { bail!("postboot not ready: {t:?}"); } @@ -847,7 +847,7 @@ impl HostManager { let pb = sys .ssh_exec("svcs -Ho sta,nsta svc:/site/postboot:default")?; - let t = pb.out.trim().split_whitespace().collect::>(); + let t = pb.out.split_whitespace().collect::>(); if t.len() != 2 || t[0] == "ON" || t[1] == "-" { bail!("postboot not ready: {t:?}"); } diff --git a/factory/gimlet/src/humility.rs b/factory/gimlet/src/humility.rs index ec43267..3d1cb6f 100644 --- a/factory/gimlet/src/humility.rs +++ b/factory/gimlet/src/humility.rs @@ -17,6 +17,7 @@ use slog::{info, warn, Logger}; use crate::pipe::*; use buildomat_common::OutputExt; +#[allow(clippy::wrong_self_convention)] pub trait ValueExt { fn is_unit(&self) -> bool; fn from_hex(&self) -> Result; @@ -57,7 +58,7 @@ impl ValueExt for Value { bail!("expected a hex number, got {t:?}"); }; - out.push(u8::from_str_radix(&hex, 16)?); + out.push(u8::from_str_radix(hex, 16)?); } _ => bail!("list should only contain terms"), } diff --git a/factory/gimlet/src/instance.rs b/factory/gimlet/src/instance.rs index 64a69b1..1580175 100644 --- a/factory/gimlet/src/instance.rs +++ b/factory/gimlet/src/instance.rs @@ -83,7 +83,7 @@ async fn instance_worker_start( } instances_with_workers.insert(id.clone()); - let c = Arc::clone(&c); + let c = Arc::clone(c); let log = log.new(o!( "component" => "instance_worker", "instance" => id.to_string(), diff --git a/factory/lab/src/main.rs b/factory/lab/src/main.rs index 5a7380b..1a31ab4 100644 --- a/factory/lab/src/main.rs +++ b/factory/lab/src/main.rs @@ -2,6 +2,8 @@ * Copyright 2023 Oxide Computer Company */ +#![allow(dead_code)] + use std::{ collections::HashMap, sync::{Arc, Mutex}, diff --git a/factory/lab/src/minder.rs b/factory/lab/src/minder.rs index 5773e2a..98732d0 100644 --- a/factory/lab/src/minder.rs +++ b/factory/lab/src/minder.rs @@ -74,7 +74,7 @@ impl FormatScript { .replace("%BASEURL%", &self.host_config.lab_baseurl) .replace("%HOST%", &self.host_config.nodename) .replace("%CONSOLE%", &self.host_config.console) - .replace("%BOOTARGS%", &bootargs) + .replace("%BOOTARGS%", bootargs) .replace("%COREURL%", &self.coreurl); if let Some(key) = self.key.as_deref() { @@ -149,7 +149,7 @@ impl CentralExt for Central { &self, name: &str, ) -> HResult<&super::config::ConfigFileHost> { - if let Some(h) = self.hosts.get(&name.to_string()) { + if let Some(h) = self.hosts.get(name) { Ok(&h.config) } else { Err(dropshot::HttpError::for_client_error( diff --git a/factory/propolis/src/config.rs b/factory/propolis/src/config.rs index 2bcc1a2..23fbcbf 100644 --- a/factory/propolis/src/config.rs +++ b/factory/propolis/src/config.rs @@ -93,7 +93,7 @@ impl ConfigFile { &self, slot: u32, id: &InstanceId, - ) -> Result { + ) -> Result> { Ok(InstanceInSlot { config: self, slot, id: id.clone() }) } diff --git a/factory/propolis/src/main.rs b/factory/propolis/src/main.rs index 7e0cde2..19873dc 100644 --- a/factory/propolis/src/main.rs +++ b/factory/propolis/src/main.rs @@ -39,7 +39,7 @@ impl Central { * Allow the per-target diagnostic configuration to override the base * diagnostic configuration. */ - Ok(self.config.diag.apply_overrides(&t.diag).build()?) + self.config.diag.apply_overrides(&t.diag).build() } } diff --git a/factory/propolis/src/vm.rs b/factory/propolis/src/vm.rs index 3e25a52..896a51a 100644 --- a/factory/propolis/src/vm.rs +++ b/factory/propolis/src/vm.rs @@ -461,8 +461,8 @@ async fn instance_worker_one( include_str!("../smf/serial.xml"), ), ] { - std::fs::write(&vmdir.join(&format!("{name}.sh")), script)?; - std::fs::write(&smfdir.join(format!("{name}.xml")), bundle)?; + std::fs::write(vmdir.join(format!("{name}.sh")), script)?; + std::fs::write(smfdir.join(format!("{name}.xml")), bundle)?; } std::fs::write(&siteprofile, include_str!("../smf/site.xml"))?; diff --git a/github/database/src/tables/check_suite.rs b/github/database/src/tables/check_suite.rs index 3fd23d9..c538552 100644 --- a/github/database/src/tables/check_suite.rs +++ b/github/database/src/tables/check_suite.rs @@ -61,7 +61,7 @@ impl From for JobFile { content, dependencies: dependencies .into_iter() - .map(|(a, b)| (a.into(), b.into())) + .map(|(a, b)| (a, b.into())) .collect(), } } diff --git a/github/dbtool/src/main.rs b/github/dbtool/src/main.rs index a70b21c..d3bd651 100644 --- a/github/dbtool/src/main.rs +++ b/github/dbtool/src/main.rs @@ -22,11 +22,11 @@ const SHORT_SHA_LEN: usize = 16; trait FlagsExt { #[must_use] - fn add_flags(&mut self, name: &'static str) -> Flags; + fn add_flags(&mut self, name: &'static str) -> Flags<'_>; } impl FlagsExt for Row { - fn add_flags(&mut self, name: &'static str) -> Flags { + fn add_flags(&mut self, name: &'static str) -> Flags<'_> { Flags { row: self, name, out: String::new() } } } @@ -69,7 +69,7 @@ impl Stuff { std::fs::DirBuilder::new().mode(0o700).recursive(true).create(&out)?; out.push(set); std::fs::DirBuilder::new().mode(0o700).recursive(true).create(&out)?; - out.push(&format!("{}.json", file)); + out.push(format!("{}.json", file)); Ok(out) } } @@ -290,7 +290,7 @@ async fn do_delivery_list(mut l: Level) -> Result<()> { ); r.add_str( "ack", - &del.ack.map(|n| n.to_string()).unwrap_or_else(|| "-".to_string()), + del.ack.map(|n| n.to_string()).unwrap_or_else(|| "-".to_string()), ); let seq = del.seq; @@ -371,10 +371,10 @@ async fn do_check_suite_list(mut l: Level) -> Result<()> { for suite in l.context().db().list_check_suites()? { let mut r = Row::default(); - r.add_str("id", &suite.id.to_string()); + r.add_str("id", suite.id.to_string()); r.add_u64("ghid", suite.github_id as u64); r.add_u64("repo", suite.repo as u64); - r.add_str("state", &format!("{:?}", suite.state)); + r.add_str("state", format!("{:?}", suite.state)); r.add_str("ssha", &suite.head_sha[0..SHORT_SHA_LEN]); r.add_str("sha", suite.head_sha); r.add_str("branch", suite.head_branch.as_deref().unwrap_or("-")); @@ -407,14 +407,14 @@ async fn do_check_suite_runs(mut l: Level) -> Result<()> { for run in l.context().db().list_check_runs_for_suite(csid)? { let mut r = Row::default(); - r.add_str("id", &run.id.to_string()); + r.add_str("id", run.id.to_string()); r.add_flags("flags") .flag('A', run.active) .flag('F', run.flushed) .build(); r.add_str("active", if run.active { "yes" } else { "no" }); r.add_str("flushed", if run.flushed { "yes" } else { "no" }); - r.add_str("variety", &run.variety.to_string()); + r.add_str("variety", run.variety.to_string()); r.add_str("name", &run.name); t.add_row(r); diff --git a/github/server/src/http.rs b/github/server/src/http.rs index 2bd1f93..68c9624 100644 --- a/github/server/src/http.rs +++ b/github/server/src/http.rs @@ -359,7 +359,7 @@ async fn details( Ok(hyper::Response::builder() .status(hyper::StatusCode::OK) .header(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8") - .header(hyper::header::CONTENT_LENGTH, out.as_bytes().len()) + .header(hyper::header::CONTENT_LENGTH, out.len()) .body(Body::from(out))?) } @@ -831,7 +831,7 @@ async fn status_impl( Ok(hyper::Response::builder() .status(hyper::StatusCode::OK) .header(hyper::header::CONTENT_TYPE, "text/html; charset=utf-8") - .header(hyper::header::CONTENT_LENGTH, out.as_bytes().len()) + .header(hyper::header::CONTENT_LENGTH, out.len()) .body(Body::from(out))?) } @@ -1020,7 +1020,7 @@ async fn branch_to_commit( Ok(hyper::Response::builder() .status(hyper::StatusCode::OK) .header(hyper::header::CONTENT_TYPE, "text/plain") - .header(hyper::header::CONTENT_LENGTH, body.as_bytes().len()) + .header(hyper::header::CONTENT_LENGTH, body.len()) .body(body.into())?) } diff --git a/github/server/src/main.rs b/github/server/src/main.rs index a274f65..17616fc 100644 --- a/github/server/src/main.rs +++ b/github/server/src/main.rs @@ -3,6 +3,7 @@ */ #![allow(clippy::vec_init_then_push)] +#![allow(dead_code)] use anyhow::{anyhow, bail, Context, Result}; use base64::Engine; @@ -300,7 +301,7 @@ impl App { fn buildomat(&self, repo: &Repository) -> buildomat_client::Client { buildomat_client::ClientBuilder::new(&self.config.buildomat.url) .bearer_token(&self.config.buildomat.token) - .delegated_user(&self.buildomat_username(repo)) + .delegated_user(self.buildomat_username(repo)) .build() .unwrap() } diff --git a/github/server/src/variety/basic.rs b/github/server/src/variety/basic.rs index 750d6bf..0a7f52b 100644 --- a/github/server/src/variety/basic.rs +++ b/github/server/src/variety/basic.rs @@ -678,11 +678,11 @@ pub(crate) async fn run( * We pass several GitHub-specific environment variables to tasks in the * job: */ - tb.env("GITHUB_REPOSITORY", &format!("{}/{}", repo.owner, repo.name)); + tb.env("GITHUB_REPOSITORY", format!("{}/{}", repo.owner, repo.name)); tb.env("GITHUB_SHA", &cs.head_sha); if let Some(branch) = cs.head_branch.as_deref() { tb.env("GITHUB_BRANCH", branch); - tb.env("GITHUB_REF", &format!("refs/heads/{}", branch)); + tb.env("GITHUB_REF", format!("refs/heads/{}", branch)); } let app0 = app.clone(); @@ -1046,7 +1046,7 @@ pub(crate) async fn artefact( .header(hyper::header::CONTENT_TYPE, "text/html") .header(hyper::header::CONTENT_LENGTH, md.len()) .body(Body::wrap(StreamBody::new( - stream.map_ok(|b| Frame::data(b)), + stream.map_ok(Frame::data), )))?, )); } @@ -1110,7 +1110,7 @@ pub(crate) async fn artefact( .header(hyper::header::CONTENT_TYPE, ct) .header(hyper::header::CONTENT_LENGTH, cl) .body(Body::wrap(StreamBody::new( - backend.into_inner_stream().map_ok(|b| Frame::data(b)), + backend.into_inner_stream().map_ok(Frame::data), )))?, )); } @@ -1257,7 +1257,7 @@ pub(crate) async fn details( &bm, &job, local_time, - format!("./{}/live", cr.id.to_string()), + format!("./{}/live", cr.id), ) .await?; } diff --git a/jobsh/src/variety/basic.rs b/jobsh/src/variety/basic.rs index 92aec58..a5c32e6 100644 --- a/jobsh/src/variety/basic.rs +++ b/jobsh/src/variety/basic.rs @@ -95,7 +95,7 @@ pub async fn output_table( for ev in page { minseq = ev.seq + 1; - payload_size += ev.payload.as_bytes().len(); + payload_size += ev.payload.len(); events.push(ev); if payload_size > PAYLOAD_SIZE_MAX { diff --git a/server/src/db/mod.rs b/server/src/db/mod.rs index 9c78fb8..8636a6a 100644 --- a/server/src/db/mod.rs +++ b/server/src/db/mod.rs @@ -2361,7 +2361,7 @@ impl Database { */ let max_val_count = 100; let max_val_kib = 10; - if value.as_bytes().len() > max_val_kib * 1024 { + if value.len() > max_val_kib * 1024 { conflict!("maximum value size is {max_val_kib}KiB"); } diff --git a/server/src/main.rs b/server/src/main.rs index dc52d40..d9992c6 100644 --- a/server/src/main.rs +++ b/server/src/main.rs @@ -4,6 +4,7 @@ #![allow(clippy::many_single_char_names)] #![allow(clippy::too_many_arguments)] +#![allow(dead_code)] use std::collections::VecDeque; use std::io::{Seek, Write}; diff --git a/server/src/workers.rs b/server/src/workers.rs index 5fdf69e..0a375b4 100644 --- a/server/src/workers.rs +++ b/server/src/workers.rs @@ -163,10 +163,8 @@ pub(crate) async fn worker_cleanup(log: Logger, c: Arc) -> Result<()> { info!(log, "starting worker liveness checks"); liveness_checks = true; } - } else { - if let Err(e) = worker_liveness_one(&log, &c).await { - error!(log, "worker liveness task error: {:?}", e); - } + } else if let Err(e) = worker_liveness_one(&log, &c).await { + error!(log, "worker liveness task error: {:?}", e); } tokio::time::sleep(delay).await; diff --git a/sse/src/lib.rs b/sse/src/lib.rs index 7d9b968..d69aa27 100644 --- a/sse/src/lib.rs +++ b/sse/src/lib.rs @@ -111,7 +111,7 @@ impl ServerSentEvents { ))))?) } - pub fn build_event(&self) -> EventBuilder { + pub fn build_event(&self) -> EventBuilder<'_> { EventBuilder { sse: self, id: None, event: None, data: None } } From 3161ba3104070d7a9c7adba5eaa2287368e93ece Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:37:23 -0800 Subject: [PATCH 02/20] Update agent/src/main.rs Co-authored-by: Joshua M. Clulow --- agent/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index c32b02a..5866ee1 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -1093,7 +1093,7 @@ async fn cmd_install(mut l: Level) -> Result<()> { let status = Command::new("/sbin/zfs") .arg("create") .arg("-o") - .arg(format!("mountpoint={}", INPUT_PATH)) + .arg(format!("mountpoint={INPUT_PATH}")) .arg(INPUT_DATASET) .env_clear() .current_dir("/") From 1733675b3aa6bd58d0040a77c0c11a021d68d678 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:38:34 -0800 Subject: [PATCH 03/20] Update github/dbtool/src/main.rs Co-authored-by: Joshua M. Clulow --- github/dbtool/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/github/dbtool/src/main.rs b/github/dbtool/src/main.rs index d3bd651..52f4cc3 100644 --- a/github/dbtool/src/main.rs +++ b/github/dbtool/src/main.rs @@ -69,7 +69,7 @@ impl Stuff { std::fs::DirBuilder::new().mode(0o700).recursive(true).create(&out)?; out.push(set); std::fs::DirBuilder::new().mode(0o700).recursive(true).create(&out)?; - out.push(format!("{}.json", file)); + out.push(format!("{file}.json")); Ok(out) } } From 5a401f8fa94e9abb5aa4af5e7656183ff5526e6e Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:38:44 -0800 Subject: [PATCH 04/20] Update server/src/workers.rs Co-authored-by: Joshua M. Clulow --- server/src/workers.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/server/src/workers.rs b/server/src/workers.rs index 0a375b4..48c4f4d 100644 --- a/server/src/workers.rs +++ b/server/src/workers.rs @@ -164,7 +164,7 @@ pub(crate) async fn worker_cleanup(log: Logger, c: Arc) -> Result<()> { liveness_checks = true; } } else if let Err(e) = worker_liveness_one(&log, &c).await { - error!(log, "worker liveness task error: {:?}", e); + error!(log, "worker liveness task error: {e:?}"); } tokio::time::sleep(delay).await; From 12b4c937f93fa98f685d8d505676353f9789aae6 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:38:56 -0800 Subject: [PATCH 05/20] Update github/server/src/variety/basic.rs Co-authored-by: Joshua M. Clulow --- github/server/src/variety/basic.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/github/server/src/variety/basic.rs b/github/server/src/variety/basic.rs index 0a7f52b..c662491 100644 --- a/github/server/src/variety/basic.rs +++ b/github/server/src/variety/basic.rs @@ -682,7 +682,7 @@ pub(crate) async fn run( tb.env("GITHUB_SHA", &cs.head_sha); if let Some(branch) = cs.head_branch.as_deref() { tb.env("GITHUB_BRANCH", branch); - tb.env("GITHUB_REF", format!("refs/heads/{}", branch)); + tb.env("GITHUB_REF", format!("refs/heads/{branch}")); } let app0 = app.clone(); From 5803a91f1e083599437b9a2929cfe86ba42149d8 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:40:52 -0800 Subject: [PATCH 06/20] Update download/src/kinds/url.rs Co-authored-by: Joshua M. Clulow --- download/src/kinds/url.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/download/src/kinds/url.rs b/download/src/kinds/url.rs index c027d93..83c4131 100644 --- a/download/src/kinds/url.rs +++ b/download/src/kinds/url.rs @@ -27,7 +27,6 @@ const BUF_COUNT: usize = 10; * assessed against the actual file size and a partial (206) response will be * returned, for both GET and HEAD. */ -#[allow(clippy::too_many_arguments)] pub async fn stream_from_url( log: &Logger, info: String, From b971cd2b32c407e442528c4a9313abb2997f820d Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 09:41:00 -0800 Subject: [PATCH 07/20] Update factory/gimlet/src/humility.rs Co-authored-by: Joshua M. Clulow --- factory/gimlet/src/humility.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/factory/gimlet/src/humility.rs b/factory/gimlet/src/humility.rs index 3d1cb6f..1611ba8 100644 --- a/factory/gimlet/src/humility.rs +++ b/factory/gimlet/src/humility.rs @@ -17,7 +17,6 @@ use slog::{info, warn, Logger}; use crate::pipe::*; use buildomat_common::OutputExt; -#[allow(clippy::wrong_self_convention)] pub trait ValueExt { fn is_unit(&self) -> bool; fn from_hex(&self) -> Result; From efbe776a4f9e2d7d03f9cafe1e3732903c5d167a Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 10:24:40 -0800 Subject: [PATCH 08/20] ignore editorial clippy complaints --- Cargo.toml | 4 ++++ download/Cargo.toml | 3 +++ factory/gimlet/Cargo.toml | 3 +++ 3 files changed, 10 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 0a77a5f..c723433 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,6 +26,10 @@ members = [ ] resolver = "2" +[workspace.lints.clippy] +too_many_arguments = "allow" +wrong_self_convention = "allow" + [workspace.dependencies] ansi-to-html = { version = "0.2", features = [ "lazy-init" ] } anyhow = "1" diff --git a/download/Cargo.toml b/download/Cargo.toml index fba1796..6a0c8a0 100644 --- a/download/Cargo.toml +++ b/download/Cargo.toml @@ -4,6 +4,9 @@ version = "0.0.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [lib] doctest = false diff --git a/factory/gimlet/Cargo.toml b/factory/gimlet/Cargo.toml index 2d0858d..d4e14ce 100644 --- a/factory/gimlet/Cargo.toml +++ b/factory/gimlet/Cargo.toml @@ -4,6 +4,9 @@ version = "0.0.0" edition = "2021" license = "MPL-2.0" +[lints] +workspace = true + [dependencies] buildomat-common = { path = "../../common" } buildomat-database = { path = "../../database" } From 1a5d92cf63b9670d0dabe175bb50fdf3d572bbf2 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 12:48:34 -0800 Subject: [PATCH 09/20] Fix test compilation by using correct JobFile type Tests in github/testdata and github/server were using buildomat_github_database::types::JobFile which lacks the parse_content_at_path method. Switch to buildomat_jobsh::jobfile::JobFile which has the parsing functionality needed by the tests. --- Cargo.lock | 1 + github/server/src/variety/basic.rs | 5 ++++- github/testdata/Cargo.toml | 1 + github/testdata/src/lib.rs | 4 ++-- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index eecbb71..62d5e0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1041,6 +1041,7 @@ dependencies = [ "anyhow", "buildomat-common", "buildomat-github-database", + "buildomat-jobsh", "serde", "serde_json", "toml", diff --git a/github/server/src/variety/basic.rs b/github/server/src/variety/basic.rs index c662491..7126bca 100644 --- a/github/server/src/variety/basic.rs +++ b/github/server/src/variety/basic.rs @@ -8,7 +8,9 @@ use buildomat_client::types::{DependSubmit, JobOutput}; use buildomat_common::*; use buildomat_download::PotentialRange; use buildomat_github_database::{types::*, Database}; -use buildomat_jobsh::variety::basic::{output_sse, output_table, BasicConfig}; +use buildomat_jobsh::variety::basic::{ + output_sse, output_table, BasicConfig, BasicConfigPublish, +}; use chrono::SecondsFormat; use dropshot::Body; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -1315,6 +1317,7 @@ pub(crate) async fn cancel( pub mod test { use super::*; use buildomat_github_testdata::*; + use buildomat_jobsh::jobfile::JobFile; #[test] fn basic_parse_basic() -> Result<()> { diff --git a/github/testdata/Cargo.toml b/github/testdata/Cargo.toml index 3c22ed0..4c41c4a 100644 --- a/github/testdata/Cargo.toml +++ b/github/testdata/Cargo.toml @@ -10,6 +10,7 @@ doctest = false [dependencies] buildomat-common = { path = "../../common" } buildomat-github-database = { path = "../database" } +buildomat-jobsh = { path = "../../jobsh" } anyhow = { workspace = true } serde = { workspace = true } diff --git a/github/testdata/src/lib.rs b/github/testdata/src/lib.rs index 8e53ca4..a78e725 100644 --- a/github/testdata/src/lib.rs +++ b/github/testdata/src/lib.rs @@ -1,4 +1,4 @@ -use buildomat_github_database::types::{CheckRunVariety, JobFile}; +use buildomat_jobsh::jobfile::{JobFile, Variety}; pub fn disabled0() -> (String, String) { let path = ".github/buildomat/jobs/disabled0.sh"; @@ -106,7 +106,7 @@ pub fn real0() -> (String, String, Option) { let expect = Some(JobFile { path: path.to_string(), name: "helios / build TUF repo".to_string(), - variety: CheckRunVariety::Basic, + variety: Variety::Basic, config: serde_json::json!({ "access_repos": [ "oxidecomputer/amd-apcb", From 4db541b961a96921d8ded9fb1a86f2e27343d8ff Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 13:01:16 -0800 Subject: [PATCH 10/20] Add unit tests for previously untested code paths jobsh/src/jobfile.rs: Add 16 tests for JobFileSet and JobFile parsing. The JobFileSet struct has complex logic for duplicate name detection, dependency cycle detection, and reference validation that was previously untested. github/database/src/tables/check_run.rs: Add 4 tests for CheckRun::get_dependencies(). This method deserializes JSON dependency data and was not covered by existing enum serialization tests. github/database/src/tables/user.rs: Add 2 tests for UserType::from_github_str(). The error path for invalid GitHub API strings was untested. types/src/config.rs: Add 2 tests for ConfigFileDiag inheritance. The explicit "inherit: true" case and mixed inheritance scenarios were not covered by existing tests. --- github/database/src/tables/check_run.rs | 73 ++++++- github/database/src/tables/user.rs | 30 +++ jobsh/src/jobfile.rs | 279 ++++++++++++++++++++++++ types/src/config.rs | 91 ++++++++ 4 files changed, 472 insertions(+), 1 deletion(-) diff --git a/github/database/src/tables/check_run.rs b/github/database/src/tables/check_run.rs index 1244d19..9cb56dd 100644 --- a/github/database/src/tables/check_run.rs +++ b/github/database/src/tables/check_run.rs @@ -216,7 +216,7 @@ impl CheckRunDependency { */ #[cfg(test)] mod test { - use super::CheckRunVariety; + use super::*; use std::str::FromStr; const CHECK_RUN_VARIETIES: &'static [(&'static str, CheckRunVariety)] = &[ @@ -239,4 +239,75 @@ mod test { assert_eq!(CheckRunVariety::from_str(s).unwrap(), *e); } } + + fn make_check_run(dependencies: Option) -> CheckRun { + CheckRun { + id: CheckRunId::generate(), + check_suite: CheckSuiteId::generate(), + name: "test-run".to_string(), + variety: CheckRunVariety::Basic, + content: None, + config: None, + private: None, + active: true, + flushed: false, + github_id: None, + dependencies: dependencies.map(JsonValue), + } + } + + #[test] + fn check_run_get_dependencies_none() { + let cr = make_check_run(None); + let deps = cr.get_dependencies().unwrap(); + assert!(deps.is_empty()); + } + + #[test] + fn check_run_get_dependencies_empty() { + let cr = make_check_run(Some(serde_json::json!({}))); + let deps = cr.get_dependencies().unwrap(); + assert!(deps.is_empty()); + } + + #[test] + fn check_run_get_dependencies_single() { + let cr = make_check_run(Some(serde_json::json!({ + "build": { + "job": "build-job", + "config": {} + } + }))); + let deps = cr.get_dependencies().unwrap(); + assert_eq!(deps.len(), 1); + assert_eq!(deps.get("build").unwrap().job(), "build-job"); + } + + #[test] + fn check_run_get_dependencies_multiple() { + let cr = make_check_run(Some(serde_json::json!({ + "build": { + "job": "build-job", + "config": {"copy_outputs": true} + }, + "test": { + "job": "test-job", + "config": {} + } + }))); + let deps = cr.get_dependencies().unwrap(); + assert_eq!(deps.len(), 2); + assert_eq!(deps.get("build").unwrap().job(), "build-job"); + assert_eq!(deps.get("test").unwrap().job(), "test-job"); + + // Verify we can extract config from dependency + #[derive(serde::Deserialize)] + struct DepConfig { + #[serde(default)] + copy_outputs: bool, + } + let build_config: DepConfig = + deps.get("build").unwrap().get_config().unwrap(); + assert!(build_config.copy_outputs); + } } diff --git a/github/database/src/tables/user.rs b/github/database/src/tables/user.rs index f4fcf3e..a2379ea 100644 --- a/github/database/src/tables/user.rs +++ b/github/database/src/tables/user.rs @@ -123,4 +123,34 @@ mod test { assert_eq!(UserType::from_str(s).unwrap(), *e); } } + + /* + * Test conversion from GitHub API strings. + */ + const GITHUB_USER_TYPES: &[(&str, UserType)] = &[ + ("User", UserType::User), + ("Bot", UserType::Bot), + ("Organization", UserType::Organisation), + ]; + + #[test] + fn user_type_from_github_str() { + for (s, e) in GITHUB_USER_TYPES { + assert_eq!(UserType::from_github_str(s).unwrap(), *e); + } + } + + #[test] + fn user_type_from_github_str_invalid() { + let invalid_types = &["user", "bot", "org", "Invalid", "", "Org"]; + for invalid in invalid_types { + let err = UserType::from_github_str(invalid).unwrap_err(); + assert!( + err.to_string().contains("invalid user type"), + "expected invalid user type error for {:?}, got: {}", + invalid, + err + ); + } + } } diff --git a/jobsh/src/jobfile.rs b/jobsh/src/jobfile.rs index e6d0386..8ecb6b6 100644 --- a/jobsh/src/jobfile.rs +++ b/jobsh/src/jobfile.rs @@ -280,3 +280,282 @@ impl JobFileSet { Ok(jobfiles) } } + +#[cfg(test)] +mod test { + use super::*; + + fn make_job(name: &str) -> String { + format!( + "#!/bin/bash\n\ + #: name = \"{name}\"\n\ + #: variety = \"basic\"\n\ + echo hello\n" + ) + } + + fn make_job_with_dep(name: &str, dep_name: &str, dep_job: &str) -> String { + format!( + "#!/bin/bash\n\ + #: name = \"{name}\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.{dep_name}]\n\ + #: job = \"{dep_job}\"\n\ + echo hello\n" + ) + } + + #[test] + fn jobfileset_single_job() { + let mut jfs = JobFileSet::new(10); + jfs.load(&make_job("test-job"), "jobs/test.sh").unwrap(); + let jobs = jfs.complete().unwrap(); + assert_eq!(jobs.len(), 1); + assert_eq!(jobs[0].name, "test-job"); + } + + #[test] + fn jobfileset_multiple_jobs() { + let mut jfs = JobFileSet::new(10); + jfs.load(&make_job("job-a"), "jobs/a.sh").unwrap(); + jfs.load(&make_job("job-b"), "jobs/b.sh").unwrap(); + jfs.load(&make_job("job-c"), "jobs/c.sh").unwrap(); + let jobs = jfs.complete().unwrap(); + assert_eq!(jobs.len(), 3); + // Jobs should be sorted by name + assert_eq!(jobs[0].name, "job-a"); + assert_eq!(jobs[1].name, "job-b"); + assert_eq!(jobs[2].name, "job-c"); + } + + #[test] + fn jobfileset_duplicate_names() { + let mut jfs = JobFileSet::new(10); + jfs.load(&make_job("same-name"), "jobs/a.sh").unwrap(); + jfs.load(&make_job("same-name"), "jobs/b.sh").unwrap(); + let err = jfs.complete().unwrap_err(); + assert!( + err.to_string().contains("used in more than one file"), + "expected duplicate name error, got: {}", + err + ); + } + + #[test] + fn jobfileset_valid_dependency() { + let mut jfs = JobFileSet::new(10); + jfs.load(&make_job("first-job"), "jobs/first.sh").unwrap(); + jfs.load( + &make_job_with_dep("second-job", "needs", "first-job"), + "jobs/second.sh", + ) + .unwrap(); + let jobs = jfs.complete().unwrap(); + assert_eq!(jobs.len(), 2); + } + + #[test] + fn jobfileset_missing_dependency() { + let mut jfs = JobFileSet::new(10); + jfs.load( + &make_job_with_dep("my-job", "needs", "nonexistent-job"), + "jobs/my.sh", + ) + .unwrap(); + let err = jfs.complete().unwrap_err(); + assert!( + err.to_string().contains("not present in the plan"), + "expected missing dependency error, got: {}", + err + ); + } + + #[test] + fn jobfileset_self_dependency_cycle() { + // A job that depends on itself + let content = "\ + #!/bin/bash\n\ + #: name = \"self-ref\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.me]\n\ + #: job = \"self-ref\"\n\ + echo hello\n"; + let mut jfs = JobFileSet::new(10); + jfs.load(content, "jobs/self.sh").unwrap(); + let err = jfs.complete().unwrap_err(); + assert!( + err.to_string().contains("dependency cycle"), + "expected cycle error, got: {}", + err + ); + } + + #[test] + fn jobfileset_two_job_cycle() { + // A -> B -> A + let job_a = "\ + #!/bin/bash\n\ + #: name = \"job-a\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.needs_b]\n\ + #: job = \"job-b\"\n\ + echo a\n"; + let job_b = "\ + #!/bin/bash\n\ + #: name = \"job-b\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.needs_a]\n\ + #: job = \"job-a\"\n\ + echo b\n"; + let mut jfs = JobFileSet::new(10); + jfs.load(job_a, "jobs/a.sh").unwrap(); + jfs.load(job_b, "jobs/b.sh").unwrap(); + let err = jfs.complete().unwrap_err(); + assert!( + err.to_string().contains("dependency cycle"), + "expected cycle error, got: {}", + err + ); + } + + #[test] + fn jobfileset_diamond_dependency_ok() { + // Diamond: D depends on B and C, both depend on A + // This is NOT a cycle and should succeed + let job_a = make_job("job-a"); + let job_b = make_job_with_dep("job-b", "needs", "job-a"); + let job_c = make_job_with_dep("job-c", "needs", "job-a"); + let job_d = "\ + #!/bin/bash\n\ + #: name = \"job-d\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.needs_b]\n\ + #: job = \"job-b\"\n\ + #: [dependencies.needs_c]\n\ + #: job = \"job-c\"\n\ + echo d\n"; + + let mut jfs = JobFileSet::new(10); + jfs.load(&job_a, "jobs/a.sh").unwrap(); + jfs.load(&job_b, "jobs/b.sh").unwrap(); + jfs.load(&job_c, "jobs/c.sh").unwrap(); + jfs.load(job_d, "jobs/d.sh").unwrap(); + let jobs = jfs.complete().unwrap(); + assert_eq!(jobs.len(), 4); + } + + #[test] + fn jobfileset_non_sh_extension() { + let mut jfs = JobFileSet::new(10); + let err = jfs.load(&make_job("test"), "jobs/test.txt").unwrap_err(); + assert!( + err.to_string().contains("unexpected item"), + "expected extension error, got: {}", + err + ); + } + + #[test] + fn jobfileset_too_many_jobs() { + let mut jfs = JobFileSet::new(2); + jfs.load(&make_job("job-1"), "jobs/1.sh").unwrap(); + jfs.load(&make_job("job-2"), "jobs/2.sh").unwrap(); + jfs.load(&make_job("job-3"), "jobs/3.sh").unwrap(); + let err = jfs.load(&make_job("job-4"), "jobs/4.sh").unwrap_err(); + assert!( + err.to_string().contains("too many job files"), + "expected too many jobs error, got: {}", + err + ); + } + + #[test] + fn jobfile_missing_shebang() { + let content = "\ + #: name = \"test\"\n\ + #: variety = \"basic\"\n\ + echo hello\n"; + let err = JobFile::parse_content_at_path(content, "test.sh").unwrap_err(); + assert!( + err.to_string().contains("interpreter line"), + "expected shebang error, got: {}", + err + ); + } + + #[test] + fn jobfile_misspelled_enable_enabled() { + let content = "\ + #!/bin/bash\n\ + #: name = \"test\"\n\ + #: variety = \"basic\"\n\ + #: enabled = true\n\ + echo hello\n"; + let err = JobFile::parse_content_at_path(content, "test.sh").unwrap_err(); + assert!( + err.to_string().contains("use \"enable\""), + "expected misspelling error, got: {}", + err + ); + } + + #[test] + fn jobfile_misspelled_enable_disabled() { + let content = "\ + #!/bin/bash\n\ + #: name = \"test\"\n\ + #: variety = \"basic\"\n\ + #: disabled = true\n\ + echo hello\n"; + let err = JobFile::parse_content_at_path(content, "test.sh").unwrap_err(); + assert!( + err.to_string().contains("use \"enable\""), + "expected misspelling error, got: {}", + err + ); + } + + #[test] + fn jobfile_with_dependencies() { + let content = "\ + #!/bin/bash\n\ + #: name = \"dependent-job\"\n\ + #: variety = \"basic\"\n\ + #: [dependencies.build]\n\ + #: job = \"build-job\"\n\ + #: [dependencies.test]\n\ + #: job = \"test-job\"\n\ + echo hello\n"; + let jf = JobFile::parse_content_at_path(content, "test.sh") + .unwrap() + .unwrap(); + assert_eq!(jf.dependencies.len(), 2); + assert_eq!(jf.dependencies.get("build").unwrap().job, "build-job"); + assert_eq!(jf.dependencies.get("test").unwrap().job, "test-job"); + } + + #[test] + fn jobfile_empty_content() { + // Empty file has no lines, so lines.next() returns None and + // TOML parsing fails due to missing required fields + let content = ""; + let err = JobFile::parse_content_at_path(content, "test.sh").unwrap_err(); + assert!( + err.to_string().contains("TOML front matter"), + "expected TOML parsing error for empty file, got: {}", + err + ); + } + + #[test] + fn jobfile_only_shebang() { + // Just a shebang with no frontmatter should fail TOML parsing + let content = "#!/bin/bash\n"; + let err = JobFile::parse_content_at_path(content, "test.sh").unwrap_err(); + assert!( + err.to_string().contains("TOML front matter"), + "expected TOML parsing error, got: {}", + err + ); + } +} diff --git a/types/src/config.rs b/types/src/config.rs index c3f4afe..fc69d14 100644 --- a/types/src/config.rs +++ b/types/src/config.rs @@ -342,6 +342,10 @@ mod test { Some(Inheritable::Inherit(ConfigFileInherit { inherit: false })) } + fn yes_inherit() -> Option { + Some(Inheritable::Inherit(ConfigFileInherit { inherit: true })) + } + #[test] fn base_with_removals() -> Result<()> { let base = ConfigFileDiag { @@ -379,4 +383,91 @@ mod test { assert_eq!(loaded, expect); Ok(()) } + + #[test] + fn explicit_inherit_true() -> Result<()> { + // When "inherit: true" is specified, values should be inherited from base + let base = ConfigFileDiag { + root_password_hash: Some(Inheritable::String("password".into())), + root_authorized_keys: Some(Inheritable::String("keys".into())), + dump_to_rpool: Some(Inheritable::Num(100)), + pre_job_diagnostic_script: Some(Inheritable::String("pre".into())), + post_job_diagnostic_script: Some(Inheritable::String( + "post".into(), + )), + rpool_disable_sync: Some(Inheritable::Bool(true)), + }; + + let over = ConfigFileDiag { + root_password_hash: yes_inherit(), + root_authorized_keys: yes_inherit(), + dump_to_rpool: yes_inherit(), + pre_job_diagnostic_script: yes_inherit(), + post_job_diagnostic_script: yes_inherit(), + rpool_disable_sync: yes_inherit(), + }; + + let loaded = base.apply_overrides(&over).build()?; + + // inherit: true should pass through base values + let expect = FactoryMetadata::V1(FactoryMetadataV1 { + addresses: Default::default(), + root_password_hash: Some("password".to_string()), + root_authorized_keys: Some("keys".to_string()), + dump_to_rpool: Some(100), + pre_job_diagnostic_script: Some("pre".to_string()), + post_job_diagnostic_script: Some("post".to_string()), + rpool_disable_sync: Some(true), + }); + + assert_eq!(loaded, expect); + Ok(()) + } + + #[test] + fn mixed_inherit_and_override() -> Result<()> { + // Some fields inherit, some override, some block inheritance + let base = ConfigFileDiag { + root_password_hash: Some(Inheritable::String("base-password".into())), + root_authorized_keys: Some(Inheritable::String("base-keys".into())), + dump_to_rpool: Some(Inheritable::Num(100)), + pre_job_diagnostic_script: Some(Inheritable::String("base-pre".into())), + post_job_diagnostic_script: Some(Inheritable::String( + "base-post".into(), + )), + rpool_disable_sync: Some(Inheritable::Bool(false)), + }; + + let over = ConfigFileDiag { + // Explicitly inherit from base + root_password_hash: yes_inherit(), + // Override with new value + root_authorized_keys: Some(Inheritable::String("new-keys".into())), + // Block inheritance (no value) + dump_to_rpool: no_inherit(), + // Implicit inherit (None = inherit from base) + pre_job_diagnostic_script: None, + // Override with new value + post_job_diagnostic_script: Some(Inheritable::String( + "new-post".into(), + )), + // Override boolean + rpool_disable_sync: Some(Inheritable::Bool(true)), + }; + + let loaded = base.apply_overrides(&over).build()?; + + let expect = FactoryMetadata::V1(FactoryMetadataV1 { + addresses: Default::default(), + root_password_hash: Some("base-password".to_string()), // inherited + root_authorized_keys: Some("new-keys".to_string()), // overridden + dump_to_rpool: None, // blocked + pre_job_diagnostic_script: Some("base-pre".to_string()), // implicit inherit + post_job_diagnostic_script: Some("new-post".to_string()), // overridden + rpool_disable_sync: Some(true), // overridden + }); + + assert_eq!(loaded, expect); + Ok(()) + } } From 3652989c8b9adfbdc43971f916744a079de09b19 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 16:23:28 -0800 Subject: [PATCH 11/20] agent: add tests for path constants Add tests that verify the current values of the hardcoded path constants: - CONFIG_PATH: /opt/buildomat/etc/agent.json - JOB_PATH: /opt/buildomat/etc/job.json - AGENT: /opt/buildomat/lib/agent Also adds illumos-specific tests for: - METHOD: /opt/buildomat/lib/start.sh - MANIFEST: /var/svc/manifest/site/buildomat-agent.xml These tests document the current behavior and will serve as a baseline for backward compatibility when these constants are refactored to functions in subsequent branches. --- agent/src/main.rs | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/agent/src/main.rs b/agent/src/main.rs index 5866ee1..ab8f5ed 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -1897,3 +1897,38 @@ async fn main() -> Result<()> { } } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_config_path_value() { + assert_eq!(CONFIG_PATH, "/opt/buildomat/etc/agent.json"); + } + + #[test] + fn test_job_path_value() { + assert_eq!(JOB_PATH, "/opt/buildomat/etc/job.json"); + } + + #[test] + fn test_agent_path_value() { + assert_eq!(AGENT, "/opt/buildomat/lib/agent"); + } + + #[cfg(target_os = "illumos")] + mod illumos_tests { + use super::*; + + #[test] + fn test_method_path_value() { + assert_eq!(METHOD, "/opt/buildomat/lib/start.sh"); + } + + #[test] + fn test_manifest_path_value() { + assert_eq!(MANIFEST, "/var/svc/manifest/site/buildomat-agent.xml"); + } + } +} From c9d60d9b04a4683273ee450cb9bf8a15dbf16b36 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 16:24:21 -0800 Subject: [PATCH 12/20] agent: add tests for control socket path Add test that verifies the current value of SOCKET_PATH constant: - SOCKET_PATH: /var/run/buildomat.sock This test documents the current behavior and will serve as a baseline for backward compatibility when this constant is refactored to a function in subsequent branches. --- agent/src/control/mod.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/agent/src/control/mod.rs b/agent/src/control/mod.rs index 832f4de..9c46e12 100644 --- a/agent/src/control/mod.rs +++ b/agent/src/control/mod.rs @@ -578,3 +578,13 @@ async fn cmd_factory_private(mut l: Level) -> Result<()> { Ok(()) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_socket_path_value() { + assert_eq!(SOCKET_PATH, "/var/run/buildomat.sock"); + } +} From 275a83a7b6d25ea3187e614b5d6e5681bfd71a46 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 16:46:28 -0800 Subject: [PATCH 13/20] agent: extract path constants to functions Replace CONFIG_PATH, JOB_PATH, and AGENT constants with config_path(), job_path(), and agent_path() functions that return PathBuf. This is a pure refactoring with no behavior change - the functions return exactly the same values as the original constants. This prepares for future configurability via environment variables. --- agent/src/main.rs | 43 +++++++++++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 16 deletions(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index ab8f5ed..c74adbb 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -44,9 +44,20 @@ struct Agent { log: Logger, } -const CONFIG_PATH: &str = "/opt/buildomat/etc/agent.json"; -const JOB_PATH: &str = "/opt/buildomat/etc/job.json"; -const AGENT: &str = "/opt/buildomat/lib/agent"; +/// Path to the agent configuration file. +fn config_path() -> PathBuf { + PathBuf::from("/opt/buildomat/etc/agent.json") +} + +/// Path to the job state file. +fn job_path() -> PathBuf { + PathBuf::from("/opt/buildomat/etc/job.json") +} + +/// Path to the agent binary. +fn agent_path() -> PathBuf { + PathBuf::from("/opt/buildomat/lib/agent") +} const INPUT_PATH: &str = "/input"; const CONTROL_PROGRAM: &str = "bmat"; const SHADOW: &str = "/etc/shadow"; @@ -1049,19 +1060,19 @@ async fn cmd_install(mut l: Level) -> Result<()> { /* * Write /opt/buildomat/etc/agent.json with this configuration. */ - make_dirs_for(CONFIG_PATH)?; - rmfile(CONFIG_PATH)?; + make_dirs_for(config_path())?; + rmfile(config_path())?; let cf = ConfigFile { baseurl, bootstrap, token: genkey(64) }; - store(CONFIG_PATH, &cf)?; + store(config_path(), &cf)?; /* * Copy the agent binary into a permanent home. */ let exe = env::current_exe()?; - make_dirs_for(AGENT)?; - rmfile(AGENT)?; - std::fs::copy(&exe, AGENT)?; - make_executable(AGENT)?; + make_dirs_for(agent_path())?; + rmfile(agent_path())?; + std::fs::copy(&exe, agent_path())?; + make_executable(agent_path())?; /* * Install the agent binary with the control program name in a location in @@ -1182,7 +1193,7 @@ async fn cmd_install(mut l: Level) -> Result<()> { async fn cmd_run(mut l: Level) -> Result<()> { no_args!(l); - let cf = load::<_, ConfigFile>(CONFIG_PATH)?; + let cf = load::<_, ConfigFile>(config_path())?; let log = l.context().log.clone(); info!(log, "agent starting"; "baseurl" => &cf.baseurl); @@ -1236,7 +1247,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { * any evidence that we've done this before, we must report it to the * central server and do nothing else. */ - if PathBuf::from(JOB_PATH).try_exists()? { + if job_path().try_exists()? { error!(log, "found previously assigned job; reporting failure"); /* @@ -1374,7 +1385,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { .create_new(true) .create(false) .write(true) - .open(JOB_PATH)?; + .open(job_path())?; jf.write_all(&diag)?; jf.flush()?; jf.sync_all()?; @@ -1904,17 +1915,17 @@ mod tests { #[test] fn test_config_path_value() { - assert_eq!(CONFIG_PATH, "/opt/buildomat/etc/agent.json"); + assert_eq!(config_path(), PathBuf::from("/opt/buildomat/etc/agent.json")); } #[test] fn test_job_path_value() { - assert_eq!(JOB_PATH, "/opt/buildomat/etc/job.json"); + assert_eq!(job_path(), PathBuf::from("/opt/buildomat/etc/job.json")); } #[test] fn test_agent_path_value() { - assert_eq!(AGENT, "/opt/buildomat/lib/agent"); + assert_eq!(agent_path(), PathBuf::from("/opt/buildomat/lib/agent")); } #[cfg(target_os = "illumos")] From 7a2c35e646f50e1b9a0d6ce6232fac398a424c31 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 9 Dec 2025 16:46:41 -0800 Subject: [PATCH 14/20] agent: extract socket path to function Replace SOCKET_PATH constant with socket_path() function. The function returns String and is called from both mod.rs (client connection) and server.rs (listener binding). This is a pure refactoring with no behavior change - the function returns exactly the same value as the original constant. --- agent/src/control/mod.rs | 9 ++++++--- agent/src/control/server.rs | 11 ++++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/agent/src/control/mod.rs b/agent/src/control/mod.rs index 9c46e12..f9542c3 100644 --- a/agent/src/control/mod.rs +++ b/agent/src/control/mod.rs @@ -18,7 +18,10 @@ use protocol::{Decoder, FactoryInfo, Message, Payload}; pub(crate) mod protocol; pub(crate) mod server; -pub const SOCKET_PATH: &str = "/var/run/buildomat.sock"; +/// Get the control socket path. +pub fn socket_path() -> String { + "/var/run/buildomat.sock".to_string() +} struct Stuff { us: Option, @@ -28,7 +31,7 @@ struct Stuff { impl Stuff { async fn connect(&mut self) -> Result<()> { - self.us = Some(UnixStream::connect(SOCKET_PATH).await?); + self.us = Some(UnixStream::connect(socket_path()).await?); self.dec = Some(Decoder::new()); Ok(()) } @@ -585,6 +588,6 @@ mod tests { #[test] fn test_socket_path_value() { - assert_eq!(SOCKET_PATH, "/var/run/buildomat.sock"); + assert_eq!(socket_path(), "/var/run/buildomat.sock"); } } diff --git a/agent/src/control/server.rs b/agent/src/control/server.rs index 29bfe9a..b0f8d23 100644 --- a/agent/src/control/server.rs +++ b/agent/src/control/server.rs @@ -17,7 +17,7 @@ use tokio::{ use super::{ protocol::{Decoder, Message, Payload}, - SOCKET_PATH, + socket_path, }; #[derive(Debug)] @@ -50,15 +50,16 @@ pub fn listen() -> Result> { * Create the UNIX socket that the control program will use to contact the * agent. */ - std::fs::remove_file(SOCKET_PATH).ok(); - let ul = UnixListener::bind(SOCKET_PATH)?; + let path = socket_path(); + std::fs::remove_file(&path).ok(); + let ul = UnixListener::bind(&path)?; /* * Allow everyone to connect: */ - let mut perm = std::fs::metadata(SOCKET_PATH)?.permissions(); + let mut perm = std::fs::metadata(&path)?.permissions(); perm.set_mode(0o777); - std::fs::set_permissions(SOCKET_PATH, perm)?; + std::fs::set_permissions(&path, perm)?; /* * Create channel to hand requests back to the main loop. From 3ab618eefccdd79cfc6b2b71b48675ec2da588f5 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 14:29:46 -0800 Subject: [PATCH 15/20] Add persistent mode fields to ConfigFile Config files are backward compatible through an optional file version field and use of accessor functions. The new fields use serde defaults so existing config files continue to work. New fields for persistent mode support: - version: config file version (defaults to 1 for backward compatibility) - opt_base: optional base directory (defaults to `/opt/buildomat`) - persistent: flag for persistent/stateful mode vs the existing ephemeral agents. --- agent/src/main.rs | 67 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index c74adbb..54382e1 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -75,14 +75,53 @@ use os_constants::*; use crate::control::protocol::StoreEntry; +fn default_version() -> u32 { + 1 +} + #[derive(Serialize, Deserialize)] struct ConfigFile { + /// Config file version. Missing field defaults to 1 for backward + /// compatibility. + #[serde(default = "default_version")] + version: u32, baseurl: String, bootstrap: String, token: String, + /// Base directory for agent files. Defaults to /opt/buildomat. + #[serde(default)] + opt_base: Option, + /// Persistent mode - skip SMF/systemd/ZFS setup, run as current user. + #[serde(default)] + persistent: bool, } impl ConfigFile { + /// Config file version. + pub fn version(&self) -> u32 { + self.version + } + + /// Base URL for buildomat server. + pub fn baseurl(&self) -> &str { + &self.baseurl + } + + /// Bootstrap token for initial registration. + pub fn bootstrap(&self) -> &str { + &self.bootstrap + } + + /// Authentication token. + pub fn token(&self) -> &str { + &self.token + } + + /// Whether running in persistent mode. + pub fn persistent(&self) -> bool { + self.persistent + } + fn make_client(&self, log: Logger) -> ClientWrap { let client = buildomat_client::ClientBuilder::new(&self.baseurl) .bearer_token(&self.token) @@ -1062,7 +1101,14 @@ async fn cmd_install(mut l: Level) -> Result<()> { */ make_dirs_for(config_path())?; rmfile(config_path())?; - let cf = ConfigFile { baseurl, bootstrap, token: genkey(64) }; + let cf = ConfigFile { + version: 2, + baseurl, + bootstrap, + token: genkey(64), + opt_base: None, + persistent: false, + }; store(config_path(), &cf)?; /* @@ -1928,6 +1974,25 @@ mod tests { assert_eq!(agent_path(), PathBuf::from("/opt/buildomat/lib/agent")); } + #[test] + fn test_version_default_deserialization() { + // Test that missing version field defaults to 1 + let json = r#"{"baseurl":"http://test","bootstrap":"b","token":"t"}"#; + let cf: ConfigFile = serde_json::from_str(json).unwrap(); + assert_eq!(cf.version(), 1); + assert!(!cf.persistent()); + assert!(cf.opt_base.is_none()); + } + + #[test] + fn test_version_2_deserialization() { + let json = r#"{"version":2,"baseurl":"http://test","bootstrap":"b","token":"t","persistent":true,"opt_base":"/custom"}"#; + let cf: ConfigFile = serde_json::from_str(json).unwrap(); + assert_eq!(cf.version(), 2); + assert!(cf.persistent()); + assert_eq!(cf.opt_base.as_deref(), Some("/custom")); + } + #[cfg(target_os = "illumos")] mod illumos_tests { use super::*; From c4d945aca79679102bca03ddadbd6ad69bc4cbc5 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 14:41:07 -0800 Subject: [PATCH 16/20] Add derived path methods to ConfigFile These methods will replace the standalone path functions, allowing paths to be derived from the config file rather than hardcoded. - opt_base(): returns base directory (default `/opt/buildomat`) - job_path(): path to job state file - job_done_path(): path to job completion marker - agent_path(): path to agent binary - control_program_path(): /usr/bin/bmat or $OPT/bin/bmat in persistent mode - socket_path(): `/var/run/buildomat.sock` or `$OPT/run/buildomat.sock` - method_path(): SMF method script path (illumos only) - mark_job_done(): renames job.json to job-done.json on completion --- agent/src/main.rs | 219 ++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 213 insertions(+), 6 deletions(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index 54382e1..07a3e0d 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -98,30 +98,119 @@ struct ConfigFile { impl ConfigFile { /// Config file version. + #[allow(dead_code)] pub fn version(&self) -> u32 { self.version } /// Base URL for buildomat server. + #[allow(dead_code)] pub fn baseurl(&self) -> &str { &self.baseurl } /// Bootstrap token for initial registration. + #[allow(dead_code)] pub fn bootstrap(&self) -> &str { &self.bootstrap } /// Authentication token. + #[allow(dead_code)] pub fn token(&self) -> &str { &self.token } /// Whether running in persistent mode. + #[allow(dead_code)] pub fn persistent(&self) -> bool { self.persistent } + /// Base directory for buildomat agent files. + #[allow(dead_code)] + fn opt_base(&self) -> PathBuf { + self.opt_base + .as_ref() + .map(PathBuf::from) + .unwrap_or_else(|| PathBuf::from("/opt/buildomat")) + } + + /// Path to the job state file (indicates job in progress). + #[allow(dead_code)] + pub fn job_path(&self) -> PathBuf { + self.opt_base().join("etc/job.json") + } + + /// Path to the job completion marker file. + #[allow(dead_code)] + pub fn job_done_path(&self) -> PathBuf { + self.opt_base().join("etc/job-done.json") + } + + /// Path to the agent binary. + #[allow(dead_code)] + pub fn agent_path(&self) -> PathBuf { + self.opt_base().join("lib/agent") + } + + /// Path to the control program (bmat). + /// In persistent mode, installed under opt_base. + /// In normal mode, installed to /usr/bin. + #[allow(dead_code)] + pub fn control_program_path(&self) -> PathBuf { + if self.persistent { + self.opt_base().join("bin").join(CONTROL_PROGRAM) + } else { + PathBuf::from(format!("/usr/bin/{CONTROL_PROGRAM}")) + } + } + + /// Path to the control socket. + /// In persistent mode, under opt_base/run. + /// In normal mode, /var/run/buildomat.sock. + #[allow(dead_code)] + pub fn socket_path(&self) -> PathBuf { + if self.persistent { + self.opt_base().join("run/buildomat.sock") + } else { + PathBuf::from("/var/run/buildomat.sock") + } + } + + /// Path to the SMF method script (illumos only). + #[cfg(target_os = "illumos")] + #[allow(dead_code)] + pub fn method_path(&self) -> PathBuf { + self.opt_base().join("lib/start.sh") + } + + /// Mark job as complete by renaming job.json to job-done.json. + /// This allows subsequent agent runs to detect that the previous job + /// completed successfully rather than crashed. + #[allow(dead_code)] + pub fn mark_job_done(&self, log: &Logger) { + let job_file = self.job_path(); + let done_file = self.job_done_path(); + + if job_file.exists() { + match std::fs::rename(&job_file, &done_file) { + Ok(()) => { + info!(log, "marked job as done"; + "from" => %job_file.display(), + "to" => %done_file.display()); + } + Err(e) => { + // Non-fatal - the job still completed + error!(log, "failed to mark job done"; + "from" => %job_file.display(), + "to" => %done_file.display(), + "error" => %e); + } + } + } + } + fn make_client(&self, log: Logger) -> ClientWrap { let client = buildomat_client::ClientBuilder::new(&self.baseurl) .bearer_token(&self.token) @@ -1959,19 +2048,133 @@ async fn main() -> Result<()> { mod tests { use super::*; + /// Helper to create a test ConfigFile with defaults. + fn test_config() -> ConfigFile { + ConfigFile { + version: 2, + baseurl: "http://test".to_string(), + bootstrap: "bootstrap".to_string(), + token: "token".to_string(), + opt_base: None, + persistent: false, + } + } + + /// Helper to create a test ConfigFile with custom opt_base. + fn test_config_with_opt_base(opt_base: &str) -> ConfigFile { + ConfigFile { + version: 2, + baseurl: "http://test".to_string(), + bootstrap: "bootstrap".to_string(), + token: "token".to_string(), + opt_base: Some(opt_base.to_string()), + persistent: false, + } + } + + /// Helper to create a persistent test ConfigFile. + fn test_config_persistent() -> ConfigFile { + ConfigFile { + version: 2, + baseurl: "http://test".to_string(), + bootstrap: "bootstrap".to_string(), + token: "token".to_string(), + opt_base: None, + persistent: true, + } + } + #[test] fn test_config_path_value() { assert_eq!(config_path(), PathBuf::from("/opt/buildomat/etc/agent.json")); } #[test] - fn test_job_path_value() { - assert_eq!(job_path(), PathBuf::from("/opt/buildomat/etc/job.json")); + fn test_job_path_default() { + let cf = test_config(); + assert_eq!(cf.job_path(), PathBuf::from("/opt/buildomat/etc/job.json")); } #[test] - fn test_agent_path_value() { - assert_eq!(agent_path(), PathBuf::from("/opt/buildomat/lib/agent")); + fn test_job_path_custom_opt() { + let cf = test_config_with_opt_base("/custom/path"); + assert_eq!(cf.job_path(), PathBuf::from("/custom/path/etc/job.json")); + } + + #[test] + fn test_agent_path_default() { + let cf = test_config(); + assert_eq!(cf.agent_path(), PathBuf::from("/opt/buildomat/lib/agent")); + } + + #[test] + fn test_persistent_default() { + let cf = test_config(); + assert!(!cf.persistent()); + } + + #[test] + fn test_persistent_enabled() { + let cf = test_config_persistent(); + assert!(cf.persistent()); + } + + #[test] + fn test_job_done_path_default() { + let cf = test_config(); + assert_eq!( + cf.job_done_path(), + PathBuf::from("/opt/buildomat/etc/job-done.json") + ); + } + + #[test] + fn test_control_program_path_normal_mode() { + let cf = test_config(); + assert_eq!(cf.control_program_path(), PathBuf::from("/usr/bin/bmat")); + } + + #[test] + fn test_control_program_path_persistent_mode() { + let cf = test_config_persistent(); + assert_eq!( + cf.control_program_path(), + PathBuf::from("/opt/buildomat/bin/bmat") + ); + } + + #[test] + fn test_control_program_path_persistent_custom_opt() { + let cf = ConfigFile { + version: 2, + baseurl: "http://test".to_string(), + bootstrap: "bootstrap".to_string(), + token: "token".to_string(), + opt_base: Some("/custom/path".to_string()), + persistent: true, + }; + assert_eq!( + cf.control_program_path(), + PathBuf::from("/custom/path/bin/bmat") + ); + } + + #[test] + fn test_socket_path_normal_mode() { + let cf = test_config(); + assert_eq!( + cf.socket_path(), + PathBuf::from("/var/run/buildomat.sock") + ); + } + + #[test] + fn test_socket_path_persistent_mode() { + let cf = test_config_persistent(); + assert_eq!( + cf.socket_path(), + PathBuf::from("/opt/buildomat/run/buildomat.sock") + ); } #[test] @@ -1998,8 +2201,12 @@ mod tests { use super::*; #[test] - fn test_method_path_value() { - assert_eq!(METHOD, "/opt/buildomat/lib/start.sh"); + fn test_method_path_default() { + let cf = test_config(); + assert_eq!( + cf.method_path(), + PathBuf::from("/opt/buildomat/lib/start.sh") + ); } #[test] From fa63d1d4bee766b5307190a88a107e1272897918 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 14:55:20 -0800 Subject: [PATCH 17/20] agent: add CLI flags for persistent mode Add command line support for persistent mode configuration: - Add config_path field to Agent struct - Add DEFAULT_CONFIG_PATH constant - Add parse_config_path_arg() to parse -c flag before hiercmd - Add --persistent and --opt-base flags to install subcommand - Update cmd_install to use config_path from context CLI usage: ``` agent -c /path/to/config.json install --persistent --opt-base /home/user/buildomat BASEURL TOKEN ```` --- agent/src/main.rs | 59 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 51 insertions(+), 8 deletions(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index 07a3e0d..8f8337e 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -42,6 +42,8 @@ use exec::ExitDetails; struct Agent { log: Logger, + /// Path to the config file. Set via -c flag. + config_path: PathBuf, } /// Path to the agent configuration file. @@ -58,9 +60,12 @@ fn job_path() -> PathBuf { fn agent_path() -> PathBuf { PathBuf::from("/opt/buildomat/lib/agent") } + const INPUT_PATH: &str = "/input"; const CONTROL_PROGRAM: &str = "bmat"; const SHADOW: &str = "/etc/shadow"; +/// Default config file path. +const DEFAULT_CONFIG_PATH: &str = "/opt/buildomat/etc/agent.json"; #[cfg(target_os = "illumos")] mod os_constants { pub const METHOD: &str = "/opt/buildomat/lib/start.sh"; @@ -1164,9 +1169,12 @@ enum Stage { async fn cmd_install(mut l: Level) -> Result<()> { l.usage_args(Some("BASEURL BOOTSTRAP_TOKEN")); l.optopt("N", "", "set nodename of machine", "NODENAME"); + l.optopt("", "opt-base", "base directory for agent files", "PATH"); + l.optflag("", "persistent", "persistent mode (skip SMF/systemd/ZFS setup)"); let a = args!(l); let log = l.context().log.clone(); + let config_path = l.context().config_path.clone(); if a.args().len() < 2 { bad_args!(l, "specify base URL and bootstrap token value"); @@ -1178,6 +1186,8 @@ async fn cmd_install(mut l: Level) -> Result<()> { */ let baseurl = a.args()[0].to_string(); let bootstrap = a.args()[1].to_string(); + let persistent = a.opts().opt_present("persistent"); + let opt_base = a.opts().opt_str("opt-base"); if let Some(nodename) = a.opts().opt_str("N") { if let Err(e) = set_nodename(&nodename) { @@ -1186,19 +1196,19 @@ async fn cmd_install(mut l: Level) -> Result<()> { } /* - * Write /opt/buildomat/etc/agent.json with this configuration. + * Write the agent configuration file. */ - make_dirs_for(config_path())?; - rmfile(config_path())?; + make_dirs_for(&config_path)?; + rmfile(&config_path)?; let cf = ConfigFile { version: 2, baseurl, bootstrap, token: genkey(64), - opt_base: None, - persistent: false, + opt_base, + persistent, }; - store(config_path(), &cf)?; + store(&config_path, &cf)?; /* * Copy the agent binary into a permanent home. @@ -2007,6 +2017,32 @@ async fn cmd_run(mut l: Level) -> Result<()> { } } +/// Parse the -c CONFIG_PATH option from command line arguments. +/// Returns the config path and the remaining arguments with -c removed. +fn parse_config_path_arg() -> (PathBuf, Vec) { + let args: Vec = std::env::args().collect(); + let mut config_path = PathBuf::from(DEFAULT_CONFIG_PATH); + let mut filtered_args = Vec::new(); + let mut skip_next = false; + + for (i, arg) in args.iter().enumerate() { + if skip_next { + skip_next = false; + continue; + } + if arg == "-c" { + if let Some(path) = args.get(i + 1) { + config_path = PathBuf::from(path); + skip_next = true; + } + } else { + filtered_args.push(arg.clone()); + } + } + + (config_path, filtered_args) +} + #[tokio::main] async fn main() -> Result<()> { let cmdname = std::env::args().next().as_deref().and_then(|s| { @@ -2027,10 +2063,17 @@ async fn main() -> Result<()> { /* * Assume that any name other than the name for the control * entrypoint is the regular agent. + * + * Parse the -c option before hiercmd takes over. + * This allows: agent -c /path/to/config.json install ... */ + let (config_path, _filtered_args) = parse_config_path_arg(); + let log = make_log("buildomat-agent"); - let mut l = - Level::new("buildomat-agent", Agent { log: log.clone() }); + let mut l = Level::new( + "buildomat-agent", + Agent { log: log.clone(), config_path }, + ); l.cmd("install", "install the agent", cmd!(cmd_install))?; l.cmd("run", "run the agent", cmd!(cmd_run))?; From 341a8ebdec2fb6b401c0129f26be98f0abde6268 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 15:10:42 -0800 Subject: [PATCH 18/20] agent: update cmd_install for persistent mode Use ConfigFile methods for paths in cmd_install: - cf.agent_path() instead of standalone agent_path() - cf.control_program_path() for bmat installation - cf.method_path() for SMF method script Skip SMF/systemd/ZFS setup in persistent mode: - illumos: skip SMF manifest, method script, zfs create, svccfg import - linux: skip input dir creation, systemd unit, daemon-reload, service start Remove now-unused agent_path() function and METHOD constant. --- agent/src/main.rs | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index 8f8337e..617df67 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -56,11 +56,6 @@ fn job_path() -> PathBuf { PathBuf::from("/opt/buildomat/etc/job.json") } -/// Path to the agent binary. -fn agent_path() -> PathBuf { - PathBuf::from("/opt/buildomat/lib/agent") -} - const INPUT_PATH: &str = "/input"; const CONTROL_PROGRAM: &str = "bmat"; const SHADOW: &str = "/etc/shadow"; @@ -68,7 +63,6 @@ const SHADOW: &str = "/etc/shadow"; const DEFAULT_CONFIG_PATH: &str = "/opt/buildomat/etc/agent.json"; #[cfg(target_os = "illumos")] mod os_constants { - pub const METHOD: &str = "/opt/buildomat/lib/start.sh"; pub const MANIFEST: &str = "/var/svc/manifest/site/buildomat-agent.xml"; pub const INPUT_DATASET: &str = "rpool/input"; } @@ -1214,30 +1208,35 @@ async fn cmd_install(mut l: Level) -> Result<()> { * Copy the agent binary into a permanent home. */ let exe = env::current_exe()?; - make_dirs_for(agent_path())?; - rmfile(agent_path())?; - std::fs::copy(&exe, agent_path())?; - make_executable(agent_path())?; + let agent_path = cf.agent_path(); + make_dirs_for(&agent_path)?; + rmfile(&agent_path)?; + std::fs::copy(&exe, &agent_path)?; + make_executable(&agent_path)?; /* * Install the agent binary with the control program name in a location in * the default PATH so that job programs can find it. + * In persistent mode, we install to opt_base/bin/ instead of /usr/bin/. */ - let cprog = format!("/usr/bin/{CONTROL_PROGRAM}"); + let cprog = cf.control_program_path(); + make_dirs_for(&cprog)?; rmfile(&cprog)?; std::fs::copy(&exe, &cprog)?; make_executable(&cprog)?; #[cfg(target_os = "illumos")] - { + if !cf.persistent() { /* * Copy SMF method script and manifest into place. + * Skipped in persistent mode as these require root privileges. */ - let method = include_str!("../smf/start.sh"); - make_dirs_for(METHOD)?; - rmfile(METHOD)?; - write_text(METHOD, method)?; - make_executable(METHOD)?; + let method_content = include_str!("../smf/start.sh"); + let method = cf.method_path(); + make_dirs_for(&method)?; + rmfile(&method)?; + write_text(&method, method_content)?; + make_executable(&method)?; let manifest = include_str!("../smf/agent.xml"); rmfile(MANIFEST)?; @@ -1245,6 +1244,7 @@ async fn cmd_install(mut l: Level) -> Result<()> { /* * Create the input directory. + * Skipped in persistent mode - input dir is provided externally. */ let status = Command::new("/sbin/zfs") .arg("create") @@ -1262,6 +1262,7 @@ async fn cmd_install(mut l: Level) -> Result<()> { /* * Import SMF service. + * Skipped in persistent mode - agent is run directly by the factory. */ let status = Command::new("/usr/sbin/svccfg") .arg("import") @@ -1277,14 +1278,16 @@ async fn cmd_install(mut l: Level) -> Result<()> { } #[cfg(target_os = "linux")] - { + if !cf.persistent() { /* * Create the input directory. + * Skipped in persistent mode - input dir is provided externally. */ std::fs::create_dir_all(INPUT_PATH)?; /* * Write a systemd unit file for the agent service. + * Skipped in persistent mode - agent is run directly by the factory. */ let unit = include_str!("../systemd/agent.service"); make_dirs_for(UNIT)?; From 3e5204c411063ed2a4dfd6bc7b25b91c0c940591 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 16:43:40 -0800 Subject: [PATCH 19/20] agent: update control module for config-based socket path Server side: - listen() now takes socket_path parameter instead of calling socket_path() - Creates parent directory if needed (for persistent mode custom paths) - cmd_run passes cf.socket_path() to listen() Client side (bmat): - socket_path() reads BUILDOMAT_SOCKET env var if set - Defaults to /var/run/buildomat.sock if not set - Agent sets BUILDOMAT_SOCKET when running jobs (next commit) This allows the socket path to vary based on persistent mode config while keeping bmat able to find the socket via env var. --- agent/src/control/mod.rs | 21 +++++++++++++++++---- agent/src/control/server.rs | 21 +++++++++++---------- agent/src/main.rs | 2 +- 3 files changed, 29 insertions(+), 15 deletions(-) diff --git a/agent/src/control/mod.rs b/agent/src/control/mod.rs index f9542c3..21ebe6a 100644 --- a/agent/src/control/mod.rs +++ b/agent/src/control/mod.rs @@ -18,9 +18,14 @@ use protocol::{Decoder, FactoryInfo, Message, Payload}; pub(crate) mod protocol; pub(crate) mod server; -/// Get the control socket path. -pub fn socket_path() -> String { - "/var/run/buildomat.sock".to_string() +/// Default socket path. +const DEFAULT_SOCKET_PATH: &str = "/var/run/buildomat.sock"; + +/// Get the control socket path for client (bmat) connections. +/// Uses BUILDOMAT_SOCKET env var if set (set by agent when running jobs), +/// otherwise falls back to the default path. +fn socket_path() -> String { + std::env::var("BUILDOMAT_SOCKET").unwrap_or_else(|_| DEFAULT_SOCKET_PATH.to_string()) } struct Stuff { @@ -587,7 +592,15 @@ mod tests { use super::*; #[test] - fn test_socket_path_value() { + fn test_socket_path_default() { + std::env::remove_var("BUILDOMAT_SOCKET"); assert_eq!(socket_path(), "/var/run/buildomat.sock"); } + + #[test] + fn test_socket_path_from_env() { + std::env::set_var("BUILDOMAT_SOCKET", "/custom/path/buildomat.sock"); + assert_eq!(socket_path(), "/custom/path/buildomat.sock"); + std::env::remove_var("BUILDOMAT_SOCKET"); + } } diff --git a/agent/src/control/server.rs b/agent/src/control/server.rs index b0f8d23..69e78b9 100644 --- a/agent/src/control/server.rs +++ b/agent/src/control/server.rs @@ -15,10 +15,7 @@ use tokio::{ }, }; -use super::{ - protocol::{Decoder, Message, Payload}, - socket_path, -}; +use super::protocol::{Decoder, Message, Payload}; #[derive(Debug)] pub struct Request { @@ -45,21 +42,25 @@ impl Request { } } -pub fn listen() -> Result> { +pub fn listen(socket_path: &std::path::Path) -> Result> { /* * Create the UNIX socket that the control program will use to contact the * agent. */ - let path = socket_path(); - std::fs::remove_file(&path).ok(); - let ul = UnixListener::bind(&path)?; + // Create parent directory if it doesn't exist (needed for persistent mode) + if let Some(parent) = socket_path.parent() { + std::fs::create_dir_all(parent).ok(); + } + + std::fs::remove_file(socket_path).ok(); + let ul = UnixListener::bind(socket_path)?; /* * Allow everyone to connect: */ - let mut perm = std::fs::metadata(&path)?.permissions(); + let mut perm = std::fs::metadata(socket_path)?.permissions(); perm.set_mode(0o777); - std::fs::set_permissions(&path, perm)?; + std::fs::set_permissions(socket_path, perm)?; /* * Create channel to hand requests back to the main loop. diff --git a/agent/src/main.rs b/agent/src/main.rs index 617df67..f613f21 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -1374,7 +1374,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { let mut pingfreq = tokio::time::interval(Duration::from_secs(5)); pingfreq.set_missed_tick_behavior(MissedTickBehavior::Skip); - let mut control = control::server::listen()?; + let mut control = control::server::listen(&cf.socket_path())?; let mut creq: Option = None; let mut bgprocs = exec::BackgroundProcesses::new(); From faee0228bbf3d0c2e01369d991999a9ec7994dff Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 10 Dec 2025 18:04:44 -0800 Subject: [PATCH 20/20] agent: update cmd_run and task execution for persistent mode Use ConfigFile methods throughout cmd_run for all path computation. Remove standalone config_path() and job_path() functions. Add task environment fields to ClientWrap: - persistent: whether running in persistent mode - socket_path: for BUILDOMAT_SOCKET env var - task_path: PATH with $opt_base/bin prepended in persistent mode - task_home: HOME env var value (current user's $HOME or /root) - task_user: USER/LOGNAME env var value - task_cwd: working directory for diagnostic scripts Update start_diag_script() and task execution to use these precomputed values instead of duplicating if/else logic. In persistent mode: - HOME/USER/LOGNAME use current user's environment - PATH includes $opt_base/bin so bmat is discoverable - Skip uid/gid setting (runs as current user) - Set BUILDOMAT_SOCKET for jobs to find control socket Call cf.mark_job_done() at all job completion points to rename job.json to job-done.json. This rename is compatible with ephemeral instances but not needed there. For a persistent instance, it is the most basic, but not complete, cleanup. --- agent/src/main.rs | 226 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 171 insertions(+), 55 deletions(-) diff --git a/agent/src/main.rs b/agent/src/main.rs index f613f21..6334bee 100644 --- a/agent/src/main.rs +++ b/agent/src/main.rs @@ -46,16 +46,6 @@ struct Agent { config_path: PathBuf, } -/// Path to the agent configuration file. -fn config_path() -> PathBuf { - PathBuf::from("/opt/buildomat/etc/agent.json") -} - -/// Path to the job state file. -fn job_path() -> PathBuf { - PathBuf::from("/opt/buildomat/etc/job.json") -} - const INPUT_PATH: &str = "/input"; const CONTROL_PROGRAM: &str = "bmat"; const SHADOW: &str = "/etc/shadow"; @@ -103,31 +93,26 @@ impl ConfigFile { } /// Base URL for buildomat server. - #[allow(dead_code)] pub fn baseurl(&self) -> &str { &self.baseurl } /// Bootstrap token for initial registration. - #[allow(dead_code)] pub fn bootstrap(&self) -> &str { &self.bootstrap } /// Authentication token. - #[allow(dead_code)] pub fn token(&self) -> &str { &self.token } /// Whether running in persistent mode. - #[allow(dead_code)] pub fn persistent(&self) -> bool { self.persistent } /// Base directory for buildomat agent files. - #[allow(dead_code)] fn opt_base(&self) -> PathBuf { self.opt_base .as_ref() @@ -136,19 +121,16 @@ impl ConfigFile { } /// Path to the job state file (indicates job in progress). - #[allow(dead_code)] pub fn job_path(&self) -> PathBuf { self.opt_base().join("etc/job.json") } /// Path to the job completion marker file. - #[allow(dead_code)] pub fn job_done_path(&self) -> PathBuf { self.opt_base().join("etc/job-done.json") } /// Path to the agent binary. - #[allow(dead_code)] pub fn agent_path(&self) -> PathBuf { self.opt_base().join("lib/agent") } @@ -156,7 +138,6 @@ impl ConfigFile { /// Path to the control program (bmat). /// In persistent mode, installed under opt_base. /// In normal mode, installed to /usr/bin. - #[allow(dead_code)] pub fn control_program_path(&self) -> PathBuf { if self.persistent { self.opt_base().join("bin").join(CONTROL_PROGRAM) @@ -168,7 +149,6 @@ impl ConfigFile { /// Path to the control socket. /// In persistent mode, under opt_base/run. /// In normal mode, /var/run/buildomat.sock. - #[allow(dead_code)] pub fn socket_path(&self) -> PathBuf { if self.persistent { self.opt_base().join("run/buildomat.sock") @@ -179,15 +159,65 @@ impl ConfigFile { /// Path to the SMF method script (illumos only). #[cfg(target_os = "illumos")] - #[allow(dead_code)] pub fn method_path(&self) -> PathBuf { self.opt_base().join("lib/start.sh") } + /// Compute the HOME environment variable for task execution. + /// In persistent mode, uses current user's HOME; otherwise /root. + pub fn task_home(&self) -> String { + if self.persistent { + env::var("HOME").unwrap_or_else(|_| "/tmp".to_string()) + } else { + "/root".to_string() + } + } + + /// Compute the USER/LOGNAME environment variable for task execution. + /// In persistent mode, uses current user; otherwise root. + pub fn task_user(&self) -> String { + if self.persistent { + env::var("USER").unwrap_or_else(|_| "nobody".to_string()) + } else { + "root".to_string() + } + } + + /// Compute the working directory for task execution. + /// In persistent mode, uses user's home directory (writable). + /// In normal mode, uses "/" (ephemeral VM, doesn't matter). + pub fn task_cwd(&self) -> PathBuf { + if self.persistent { + PathBuf::from(self.task_home()) + } else { + PathBuf::from("/") + } + } + + /// Compute the PATH environment variable for task execution. + /// In persistent mode, prepends opt_base/bin so bmat is findable. + pub fn task_path(&self) -> String { + let mut paths: Vec = Vec::new(); + + if self.persistent { + paths.push(self.opt_base().join("bin").to_string_lossy().to_string()); + } + + paths.extend([ + "/usr/bin".to_string(), + "/bin".to_string(), + "/usr/sbin".to_string(), + "/sbin".to_string(), + "/opt/ooce/bin".to_string(), + "/opt/ooce/sbin".to_string(), + ]); + + paths.join(":") + } + /// Mark job as complete by renaming job.json to job-done.json. /// This allows subsequent agent runs to detect that the previous job /// completed successfully rather than crashed. - #[allow(dead_code)] pub fn mark_job_done(&self, log: &Logger) { let job_file = self.job_path(); let done_file = self.job_done_path(); @@ -228,7 +258,19 @@ impl ConfigFile { rx, )); - ClientWrap { log, client, job: None, tx: None, worker_tx } + ClientWrap { + log, + client, + job: None, + tx: None, + worker_tx, + persistent: self.persistent, + socket_path: self.socket_path().to_string_lossy().to_string(), + task_path: self.task_path(), + task_home: self.task_home(), + task_user: self.task_user(), + task_cwd: self.task_cwd(), + } } } @@ -422,6 +464,18 @@ pub(crate) struct ClientWrap { job: Option>, tx: Option>, worker_tx: mpsc::Sender, + /// Whether running in persistent mode. + persistent: bool, + /// Path to the socket (for setting BUILDOMAT_SOCKET env var). + socket_path: String, + /// PATH environment variable for task execution. + task_path: String, + /// HOME environment variable for task execution. + task_home: String, + /// USER/LOGNAME environment variable for task execution. + task_user: String, + /// Working directory for task execution. + task_cwd: PathBuf, } impl ClientWrap { @@ -780,23 +834,23 @@ impl ClientWrap { */ cmd.env_clear(); - cmd.env("HOME", "/root"); - cmd.env("USER", "root"); - cmd.env("LOGNAME", "root"); + cmd.env("HOME", &self.task_home); + cmd.env("USER", &self.task_user); + cmd.env("LOGNAME", &self.task_user); cmd.env("TZ", "UTC"); - cmd.env( - "PATH", - "/usr/bin:/bin:/usr/sbin:/sbin:/opt/ooce/bin:/opt/ooce/sbin", - ); + cmd.env("PATH", &self.task_path); cmd.env("LANG", "en_US.UTF-8"); cmd.env("LC_ALL", "en_US.UTF-8"); + cmd.env("BUILDOMAT_SOCKET", &self.socket_path); /* - * Run the diagnostic script as root: + * Run the diagnostic script as root (skip uid/gid in persistent mode): */ - cmd.current_dir("/"); - cmd.uid(0); - cmd.gid(0); + cmd.current_dir(&self.task_cwd); + if !self.persistent { + cmd.uid(0); + cmd.gid(0); + } match exec::run_diagnostic(cmd, name) { Ok(c) => Some(c), @@ -1341,17 +1395,18 @@ async fn cmd_install(mut l: Level) -> Result<()> { async fn cmd_run(mut l: Level) -> Result<()> { no_args!(l); - let cf = load::<_, ConfigFile>(config_path())?; + let config_path = l.context().config_path.clone(); + let cf = load::<_, ConfigFile>(&config_path)?; let log = l.context().log.clone(); - info!(log, "agent starting"; "baseurl" => &cf.baseurl); + info!(log, "agent starting"; "baseurl" => cf.baseurl()); let mut cw = cf.make_client(log.clone()); let res = loop { let res = cw .client .worker_bootstrap() - .body_map(|body| body.bootstrap(&cf.bootstrap).token(&cf.token)) + .body_map(|body| body.bootstrap(cf.bootstrap()).token(cf.token())) .send() .await; @@ -1395,7 +1450,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { * any evidence that we've done this before, we must report it to the * central server and do nothing else. */ - if job_path().try_exists()? { + if cf.job_path().try_exists()? { error!(log, "found previously assigned job; reporting failure"); /* @@ -1533,7 +1588,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { .create_new(true) .create(false) .write(true) - .open(job_path())?; + .open(cf.job_path())?; jf.write_all(&diag)?; jf.flush()?; jf.sync_all()?; @@ -1758,21 +1813,17 @@ async fn cmd_run(mut l: Level) -> Result<()> { /* * Absent a request for an entirely clean slate, we * set a few specific environment variables. - * XXX HOME/USER/LOGNAME should probably respect "uid" */ - cmd.env("HOME", "/root"); - cmd.env("USER", "root"); - cmd.env("LOGNAME", "root"); + cmd.env("HOME", &cw.task_home); + cmd.env("USER", &cw.task_user); + cmd.env("LOGNAME", &cw.task_user); cmd.env("TZ", "UTC"); - cmd.env( - "PATH", - "/usr/bin:/bin:/usr/sbin:/sbin:\ - /opt/ooce/bin:/opt/ooce/sbin", - ); + cmd.env("PATH", &cw.task_path); cmd.env("LANG", "en_US.UTF-8"); cmd.env("LC_ALL", "en_US.UTF-8"); cmd.env("BUILDOMAT_JOB_ID", cw.job_id().unwrap()); cmd.env("BUILDOMAT_TASK_ID", t.id.to_string()); + cmd.env("BUILDOMAT_SOCKET", &cw.socket_path); } for (k, v) in t.env.iter() { /* @@ -1786,10 +1837,16 @@ async fn cmd_run(mut l: Level) -> Result<()> { /* * Each task may be expected to run under a different user * account or with a different working directory. + * + * In persistent mode, we skip the uid/gid setting and run as + * the current user. This allows testing on persistent testbeds + * without requiring root privileges. */ cmd.current_dir(&t.workdir); - cmd.uid(t.uid); - cmd.gid(t.gid); + if !cw.persistent { + cmd.uid(t.uid); + cmd.gid(t.gid); + } match exec::run(cmd) { Ok(c) => { @@ -1967,9 +2024,11 @@ async fn cmd_run(mut l: Level) -> Result<()> { stage = Stage::PostDiagnostics(c, None); } else { cw.diagnostics_complete(false).await; + cf.mark_job_done(&log); stage = Stage::Complete; } } else { + cf.mark_job_done(&log); stage = Stage::Complete; continue; } @@ -2005,6 +2064,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { */ cw.diagnostics_complete(failed.unwrap()).await; + cf.mark_job_done(&log); stage = Stage::Complete; } None => { @@ -2012,6 +2072,7 @@ async fn cmd_run(mut l: Level) -> Result<()> { .await; cw.diagnostics_complete(false).await; + cf.mark_job_done(&log); stage = Stage::Complete; } } @@ -2130,11 +2191,6 @@ mod tests { } } - #[test] - fn test_config_path_value() { - assert_eq!(config_path(), PathBuf::from("/opt/buildomat/etc/agent.json")); - } - #[test] fn test_job_path_default() { let cf = test_config(); @@ -2223,6 +2279,66 @@ mod tests { ); } + #[test] + fn test_task_path_normal_mode() { + let cf = test_config(); + assert_eq!( + cf.task_path(), + "/usr/bin:/bin:/usr/sbin:/sbin:/opt/ooce/bin:/opt/ooce/sbin" + ); + } + + #[test] + fn test_task_path_persistent_mode() { + let cf = test_config_persistent(); + assert_eq!( + cf.task_path(), + "/opt/buildomat/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/ooce/bin:/opt/ooce/sbin" + ); + } + + #[test] + fn test_task_home_normal_mode() { + let cf = test_config(); + assert_eq!(cf.task_home(), "/root"); + } + + #[test] + fn test_task_user_normal_mode() { + let cf = test_config(); + assert_eq!(cf.task_user(), "root"); + } + + #[test] + fn test_task_cwd_normal_mode() { + let cf = test_config(); + assert_eq!(cf.task_cwd(), PathBuf::from("/")); + } + + #[test] + fn test_task_cwd_persistent_mode() { + let cf = test_config_persistent(); + // In persistent mode, task_cwd uses task_home which reads env var + // For this test, we just verify it's not "/" + assert_ne!(cf.task_cwd(), PathBuf::from("/")); + } + + #[test] + fn test_task_path_persistent_custom_opt() { + let cf = ConfigFile { + version: 2, + baseurl: "http://test".to_string(), + bootstrap: "bootstrap".to_string(), + token: "token".to_string(), + opt_base: Some("/custom/path".to_string()), + persistent: true, + }; + assert_eq!( + cf.task_path(), + "/custom/path/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/ooce/bin:/opt/ooce/sbin" + ); + } + #[test] fn test_version_default_deserialization() { // Test that missing version field defaults to 1