From c903c4292e2dd9926c16cf24a2782775ce3fe202 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Mon, 27 Apr 2026 14:14:27 +0000 Subject: [PATCH 01/64] add --docker build flag and verify command --- Cargo.lock | 1 + cmd/soroban-cli/Cargo.toml | 3 + cmd/soroban-cli/src/commands/container/mod.rs | 2 +- .../src/commands/contract/build.rs | 186 ++++++++-- .../src/commands/contract/build_docker.rs | 338 ++++++++++++++++++ .../src/commands/contract/deploy/wasm.rs | 6 +- cmd/soroban-cli/src/commands/contract/mod.rs | 11 +- .../src/commands/contract/upload.rs | 6 +- .../src/commands/contract/verify.rs | 137 +++++++ 9 files changed, 652 insertions(+), 38 deletions(-) create mode 100644 cmd/soroban-cli/src/commands/contract/build_docker.rs create mode 100644 cmd/soroban-cli/src/commands/contract/verify.rs diff --git a/Cargo.lock b/Cargo.lock index 7e5dc91354..b769eb65ec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5400,6 +5400,7 @@ dependencies = [ "indexmap 2.11.0", "itertools 0.10.5", "keyring", + "libc", "mockito", "num-bigint", "open", diff --git a/cmd/soroban-cli/Cargo.toml b/cmd/soroban-cli/Cargo.toml index f443372043..56a1b88351 100644 --- a/cmd/soroban-cli/Cargo.toml +++ b/cmd/soroban-cli/Cargo.toml @@ -127,6 +127,9 @@ whoami = "1.5.2" serde_with = "3.11.0" rustc_version = "0.4.1" +[target.'cfg(unix)'.dependencies] +libc = "0.2" + [build-dependencies] crate-git-revision = "0.0.6" serde.workspace = true diff --git a/cmd/soroban-cli/src/commands/container/mod.rs b/cmd/soroban-cli/src/commands/container/mod.rs index 08203095d3..6d025cac4e 100644 --- a/cmd/soroban-cli/src/commands/container/mod.rs +++ b/cmd/soroban-cli/src/commands/container/mod.rs @@ -1,7 +1,7 @@ use crate::commands::global; pub(crate) mod logs; -mod shared; +pub(crate) mod shared; pub(crate) mod start; pub(crate) mod stop; diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index ad0012e8a4..7065ea3812 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,7 +19,9 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{global, version}, + commands::{ + container::shared::Args as ContainerArgs, contract::build_docker, global, version, + }, print::Print, wasm, }; @@ -95,6 +97,27 @@ pub struct Cmd { #[arg(long, conflicts_with = "out_dir", help_heading = "Other")] pub print_commands_only: bool, + /// Run `cargo rustc` inside a Docker container for reproducible builds. + /// + /// Without a value: pulls `docker.io/library/rust:latest` (linux/amd64) + /// and records the resolved image digest in contract metadata. + /// + /// With a value: uses the specified image. Pin via `@sha256:...` + /// for fully-reproducible builds. + #[arg( + long, + num_args = 0..=1, + require_equals = true, + default_missing_value = "docker.io/library/rust:latest", + value_name = "IMAGE", + value_parser = parse_docker_image, + help_heading = "Reproducible Build", + )] + pub docker: Option, + + #[command(flatten)] + pub container_args: ContainerArgs, + #[command(flatten)] pub build_args: BuildArgs, } @@ -112,6 +135,15 @@ pub struct BuildArgs { pub optimize: bool, } +fn parse_docker_image(s: &str) -> Result { + if s.is_empty() { + return Err( + "image cannot be empty; pass --docker without a value to use the default image, or --docker=".to_string(), + ); + } + Ok(s.to_string()) +} + pub fn parse_meta_arg(s: &str) -> Result<(String, String), Error> { let parts = s.splitn(2, '='); @@ -190,6 +222,9 @@ pub enum Error { #[error("wasm parsing error: {0}")] WasmParsing(String), + + #[error(transparent)] + Docker(#[from] build_docker::Error), } const WASM_TARGET: &str = "wasm32v1-none"; @@ -208,6 +243,8 @@ impl Default for Cmd { out_dir: None, locked: false, print_commands_only: false, + docker: None, + container_args: ContainerArgs { docker_host: None }, build_args: BuildArgs::default(), } } @@ -216,16 +253,17 @@ impl Default for Cmd { impl Cmd { /// Builds the project and returns the built WASM artifacts. #[allow(clippy::too_many_lines)] - pub fn run(&self, global_args: &global::Args) -> Result, Error> { + pub async fn run(&self, global_args: &global::Args) -> Result, Error> { let print = Print::new(global_args.quiet); let working_dir = env::current_dir().map_err(Error::GettingCurrentDir)?; let metadata = self.metadata()?; let packages = self.packages(&metadata)?; let target_dir = &metadata.target_directory; + let workspace_root = metadata.workspace_root.as_std_path().to_path_buf(); // Run build configuration checks (only when actually building) if !self.print_commands_only { - run_checks(metadata.workspace_root.as_std_path(), &self.profile)?; + run_checks(&workspace_root, &self.profile)?; } if let Some(package) = &self.package { @@ -237,62 +275,121 @@ impl Cmd { } let wasm_target = get_wasm_target()?; + // When building inside Docker, force --locked to keep builds deterministic. + let locked = self.locked || self.docker.is_some(); let mut built_contracts = Vec::new(); for p in packages { - let mut cmd = Command::new("cargo"); - cmd.stdout(Stdio::piped()); - cmd.arg("rustc"); - if self.locked { - cmd.arg("--locked"); + // Build cargo args + env once; both host and docker paths consume them. + let manifest_path = if self.docker.is_some() { + // Inside the container the workspace is mounted at /work, so make + // the manifest path relative to the workspace root. + let rel = pathdiff::diff_paths(&p.manifest_path, &workspace_root) + .unwrap_or(p.manifest_path.clone().into()); + Path::new(build_docker::WORK_DIR).join(rel) + } else { + pathdiff::diff_paths(&p.manifest_path, &working_dir) + .unwrap_or(p.manifest_path.clone().into()) + }; + + let mut cargo_args: Vec = Vec::new(); + if locked { + cargo_args.push("--locked".to_string()); } - let manifest_path = pathdiff::diff_paths(&p.manifest_path, &working_dir) - .unwrap_or(p.manifest_path.clone().into()); - cmd.arg(format!( + cargo_args.push(format!( "--manifest-path={}", manifest_path.to_string_lossy() )); - cmd.arg("--crate-type=cdylib"); - cmd.arg(format!("--target={wasm_target}")); + cargo_args.push("--crate-type=cdylib".to_string()); + cargo_args.push(format!("--target={wasm_target}")); if self.profile == "release" { - cmd.arg("--release"); + cargo_args.push("--release".to_string()); } else { - cmd.arg(format!("--profile={}", self.profile)); + cargo_args.push(format!("--profile={}", self.profile)); } if self.all_features { - cmd.arg("--all-features"); + cargo_args.push("--all-features".to_string()); } if self.no_default_features { - cmd.arg("--no-default-features"); + cargo_args.push("--no-default-features".to_string()); } if let Some(features) = self.features() { let requested: HashSet = features.iter().cloned().collect(); let available = p.features.iter().map(|f| f.0).cloned().collect(); let activate = requested.intersection(&available).join(","); if !activate.is_empty() { - cmd.arg(format!("--features={activate}")); + cargo_args.push(format!("--features={activate}")); } } + let mut env_vars: Vec<(String, String)> = Vec::new(); if let Some(rustflags) = make_rustflags_to_remap_absolute_paths(&print)? { - cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); + env_vars.push(("CARGO_BUILD_RUSTFLAGS".to_string(), rustflags)); } + env_vars.push(( + "SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2".to_string(), + "1".to_string(), + )); - // Set env var to inform the SDK that this CLI supports spec - // optimization using markers. - cmd.env("SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2", "1"); - - let cmd_str = serialize_command(&cmd); + // Resolved image digest, populated only on the docker path. Embedded + // in contract metadata as `bldimg`. + let mut bldimg: Option = None; + + if let Some(image) = &self.docker { + let source_date_epoch = build_docker::source_date_epoch(&workspace_root); + if self.print_commands_only { + let line = build_docker::print_docker_command( + &workspace_root, + target_dir.as_std_path(), + &cargo_args, + &env_vars, + image, + &source_date_epoch, + )?; + println!("{line}"); + continue; + } - if self.print_commands_only { - println!("{cmd_str}"); + let summary = format_docker_summary(image, &cargo_args, &env_vars); + print.infoln(summary); + + let resolved = build_docker::run_cargo_rustc_in_docker(build_docker::DockerRun { + workspace_root: &workspace_root, + target_dir: target_dir.as_std_path(), + cargo_args: cargo_args.clone(), + env_vars: env_vars.clone(), + image_ref: image.clone(), + source_date_epoch, + container_args: &self.container_args, + print: &print, + }) + .await?; + bldimg = Some(resolved); } else { + let mut cmd = Command::new("cargo"); + cmd.stdout(Stdio::piped()); + cmd.arg("rustc"); + for a in &cargo_args { + cmd.arg(a); + } + for (k, v) in &env_vars { + cmd.env(k, v); + } + + let cmd_str = serialize_command(&cmd); + + if self.print_commands_only { + println!("{cmd_str}"); + continue; + } print.infoln(cmd_str); let status = cmd.status().map_err(Error::CargoCmd)?; if !status.success() { return Err(Error::Exit(status)); } + } + { let wasm_name = p.name.replace('-', "_"); let file = format!("{wasm_name}.wasm"); let target_file_path = Path::new(target_dir) @@ -300,7 +397,7 @@ impl Cmd { .join(&self.profile) .join(&file); - self.inject_meta(&target_file_path)?; + self.inject_meta(&target_file_path, bldimg.as_deref())?; Self::filter_spec(&target_file_path)?; let final_path = if let Some(out_dir) = &self.out_dir { @@ -417,9 +514,9 @@ impl Cmd { cmd.exec() } - fn inject_meta(&self, target_file_path: &PathBuf) -> Result<(), Error> { + fn inject_meta(&self, target_file_path: &PathBuf, bldimg: Option<&str>) -> Result<(), Error> { let mut wasm_bytes = fs::read(target_file_path).map_err(Error::ReadingWasmFile)?; - let xdr = self.encoded_new_meta()?; + let xdr = self.encoded_new_meta(bldimg)?; wasm_gen::write_custom_section(&mut wasm_bytes, META_CUSTOM_SECTION_NAME, &xdr); // Deleting .wasm file effectively unlinking it from /release/deps/.wasm preventing from overwrite @@ -468,7 +565,7 @@ impl Cmd { fs::write(target_file_path, new_wasm).map_err(Error::WritingWasmFile) } - fn encoded_new_meta(&self) -> Result, Error> { + fn encoded_new_meta(&self, bldimg: Option<&str>) -> Result, Error> { let mut new_meta: Vec = Vec::new(); // Always inject CLI version @@ -478,6 +575,18 @@ impl Cmd { }); new_meta.push(cli_meta_entry); + // Reproducible build image (only when --docker was used). + if let Some(image) = bldimg { + let key: StringM = "bldimg" + .to_string() + .try_into() + .map_err(|e| Error::MetaArg(format!("bldimg is an invalid metadata key: {e}")))?; + let val: StringM = image.to_string().try_into().map_err(|e| { + Error::MetaArg(format!("{image} is an invalid metadata value: {e}")) + })?; + new_meta.push(ScMetaEntry::ScMetaV0(ScMetaV0 { key, val })); + } + // Add args provided meta for (k, v) in self.build_args.meta.clone() { let key: StringM = k @@ -574,6 +683,23 @@ impl Cmd { } } +fn format_docker_summary(image: &str, cargo_args: &[String], env_vars: &[(String, String)]) -> String { + let mut parts = Vec::::new(); + parts.push(format!("docker[{image}]")); + for (k, v) in env_vars { + parts.push(format!( + "{k}={}", + shell_escape::escape(v.into()).into_owned() + )); + } + parts.push("cargo".to_string()); + parts.push("rustc".to_string()); + for a in cargo_args { + parts.push(shell_escape::escape(a.into()).into_owned()); + } + parts.join(" ") +} + fn serialize_command(cmd: &Command) -> String { let mut parts = Vec::::new(); parts.extend(cmd.get_envs().map(|(key, val)| { diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs new file mode 100644 index 0000000000..b456334708 --- /dev/null +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -0,0 +1,338 @@ +use std::path::Path; + +use bollard::{ + models::ContainerCreateBody, + query_parameters::{ + CreateContainerOptions, CreateImageOptions, LogsOptions, RemoveContainerOptions, + StartContainerOptions, WaitContainerOptions, + }, + service::HostConfig, + Docker, +}; +use futures_util::{StreamExt, TryStreamExt}; + +use crate::{ + commands::container::shared::{Args as ContainerArgs, Error as ContainerError}, + print::Print, +}; + +pub const DEFAULT_IMAGE: &str = "docker.io/library/rust:latest"; +const PLATFORM: &str = "linux/amd64"; +pub const WORK_DIR: &str = "/work"; +const TARGET_DIR: &str = "/target"; +const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error("cannot connect to docker daemon; is the daemon running? ({0})")] + DockerNotRunning(ContainerError), + + #[error("pulling docker image {image}: {source}")] + DockerImagePull { + image: String, + source: bollard::errors::Error, + }, + + #[error("inspecting docker image {image}: {source}")] + DockerImageInspect { + image: String, + source: bollard::errors::Error, + }, + + #[error("docker image {image} has no repository digest; pin via --docker=/@sha256:...")] + DockerNoDigest { image: String }, + + #[error("build failed inside docker container (exit {0})")] + DockerBuildExit(i64), + + #[error("docker run: {0}")] + DockerRun(#[from] bollard::errors::Error), + + #[error("resolving CARGO_HOME: {0}")] + CargoHome(std::io::Error), +} + +/// Inputs for a single `cargo rustc` invocation inside a container. +pub struct DockerRun<'a> { + pub workspace_root: &'a Path, + pub target_dir: &'a Path, + pub cargo_args: Vec, + pub env_vars: Vec<(String, String)>, + pub image_ref: String, + pub source_date_epoch: String, + pub container_args: &'a ContainerArgs, + pub print: &'a Print, +} + +/// Pull (if needed), run `cargo rustc` inside the container, and return the +/// fully-qualified image reference including the resolved `@sha256:...` digest. +pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result { + let docker: Docker = match run.container_args.connect_to_docker(run.print).await { + Ok(d) => d, + Err(e) => return Err(map_connect_error(e)), + }; + + pull_image(&docker, &run.image_ref, run.print).await?; + let resolved_image = resolve_image_digest(&docker, &run.image_ref).await?; + + let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; + let registry = cargo_home.join("registry"); + + let binds = vec![ + format!("{}:{}", run.workspace_root.display(), WORK_DIR), + format!("{}:{}", run.target_dir.display(), TARGET_DIR), + format!("{}:{}", registry.display(), REGISTRY_DIR), + ]; + + let mut env: Vec = run + .env_vars + .iter() + .map(|(k, v)| format!("{k}={v}")) + .collect(); + env.push(format!("CARGO_TARGET_DIR={TARGET_DIR}")); + env.push(format!("SOURCE_DATE_EPOCH={}", run.source_date_epoch)); + + let mut cmd = vec!["cargo".to_string(), "rustc".to_string()]; + cmd.extend(run.cargo_args.iter().cloned()); + + let user = current_uid_gid(); + + let config = ContainerCreateBody { + image: Some(resolved_image.clone()), + cmd: Some(cmd), + env: Some(env), + working_dir: Some(WORK_DIR.to_string()), + user, + attach_stdout: Some(true), + attach_stderr: Some(true), + host_config: Some(HostConfig { + binds: Some(binds), + network_mode: Some("none".to_string()), + auto_remove: Some(false), + ..Default::default() + }), + ..Default::default() + }; + + let create_resp = docker + .create_container(None::, config) + .await?; + let container_id = create_resp.id; + + let result = run_and_wait(&docker, &container_id, run.print).await; + + let _ = docker + .remove_container( + &container_id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await; + + result?; + + Ok(resolved_image) +} + +async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Result<(), Error> { + docker + .start_container(container_id, None::) + .await?; + + let logs_opts = LogsOptions { + follow: true, + stdout: true, + stderr: true, + ..Default::default() + }; + let mut log_stream = docker.logs(container_id, Some(logs_opts)); + while let Some(item) = log_stream.next().await { + match item { + Ok(out) => { + let s = out.to_string(); + let s = s.trim_end_matches('\n'); + if !s.is_empty() { + print.infoln(s); + } + } + Err(e) => return Err(Error::DockerRun(e)), + } + } + + let mut wait_stream = docker.wait_container(container_id, None::); + let mut exit_code: i64 = 0; + while let Some(res) = wait_stream.next().await { + match res { + Ok(r) => exit_code = r.status_code, + Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => { + exit_code = code; + } + Err(e) => return Err(Error::DockerRun(e)), + } + } + if exit_code != 0 { + return Err(Error::DockerBuildExit(exit_code)); + } + Ok(()) +} + +async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { + let opts = CreateImageOptions { + from_image: Some(image.to_string()), + platform: PLATFORM.to_string(), + ..Default::default() + }; + let mut stream = docker.create_image(Some(opts), None, None); + while let Some(item) = stream.try_next().await.map_err(|e| Error::DockerImagePull { + image: image.to_string(), + source: e, + })? { + if let Some(status) = item.status { + if status.contains("Pulling from") || status.contains("Digest") || status.contains("Status") { + print.infoln(status); + } + } + } + Ok(()) +} + +async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { + if let Some(digest) = parse_pinned_digest(image) { + return Ok(format!("{}@{}", strip_digest(image), digest)); + } + let info = docker + .inspect_image(image) + .await + .map_err(|e| Error::DockerImageInspect { + image: image.to_string(), + source: e, + })?; + let repo_digests = info.repo_digests.unwrap_or_default(); + let first = repo_digests + .into_iter() + .next() + .ok_or_else(|| Error::DockerNoDigest { + image: image.to_string(), + })?; + Ok(first) +} + +fn parse_pinned_digest(image: &str) -> Option { + let (_, after) = image.rsplit_once('@')?; + if after.starts_with("sha256:") { + Some(after.to_string()) + } else { + None + } +} + +fn strip_digest(image: &str) -> &str { + image.split_once('@').map_or(image, |(name, _)| name) +} + +fn map_connect_error(e: ContainerError) -> Error { + Error::DockerNotRunning(e) +} + +#[allow(clippy::unnecessary_wraps)] +#[cfg(unix)] +fn current_uid_gid() -> Option { + // SAFETY: getuid/getgid are infallible POSIX calls returning the real + // user/group ID of the calling process. + let uid = unsafe { libc::getuid() }; + let gid = unsafe { libc::getgid() }; + Some(format!("{uid}:{gid}")) +} + +#[cfg(not(unix))] +fn current_uid_gid() -> Option { + None +} + +/// Build the equivalent `docker run ...` command line for `--print-commands-only`. +pub fn print_docker_command( + workspace_root: &Path, + target_dir: &Path, + cargo_args: &[String], + env_vars: &[(String, String)], + image_ref: &str, + source_date_epoch: &str, +) -> Result { + let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; + let registry = cargo_home.join("registry"); + + let mut parts: Vec = vec![ + "docker".to_string(), + "run".to_string(), + "--rm".to_string(), + format!("--platform={PLATFORM}"), + "--network=none".to_string(), + format!("-w {WORK_DIR}"), + ]; + + if let Some(user) = current_uid_gid() { + parts.push(format!("-u {user}")); + } + + parts.push(shell_escape_kv( + "-v", + &format!("{}:{}", workspace_root.display(), WORK_DIR), + )); + parts.push(shell_escape_kv( + "-v", + &format!("{}:{}", target_dir.display(), TARGET_DIR), + )); + parts.push(shell_escape_kv( + "-v", + &format!("{}:{}", registry.display(), REGISTRY_DIR), + )); + + for (k, v) in env_vars { + parts.push(shell_escape_kv("-e", &format!("{k}={v}"))); + } + parts.push(shell_escape_kv( + "-e", + &format!("CARGO_TARGET_DIR={TARGET_DIR}"), + )); + parts.push(shell_escape_kv( + "-e", + &format!("SOURCE_DATE_EPOCH={source_date_epoch}"), + )); + + parts.push(shell_escape::escape(image_ref.into()).into_owned()); + parts.push("cargo".to_string()); + parts.push("rustc".to_string()); + for a in cargo_args { + parts.push(shell_escape::escape(a.into()).into_owned()); + } + + Ok(parts.join(" ")) +} + +fn shell_escape_kv(flag: &str, value: &str) -> String { + format!( + "{flag} {}", + shell_escape::escape(value.into()).into_owned() + ) +} + +/// Best-effort SOURCE_DATE_EPOCH derived from the workspace's HEAD commit time. +/// Falls back to `"0"` when not in a git repo or git is unavailable. +pub fn source_date_epoch(workspace_root: &Path) -> String { + let output = std::process::Command::new("git") + .arg("-C") + .arg(workspace_root) + .args(["log", "-1", "--format=%ct"]) + .output(); + if let Ok(out) = output { + if out.status.success() { + let s = String::from_utf8_lossy(&out.stdout).trim().to_string(); + if !s.is_empty() { + return s; + } + } + } + "0".to_string() +} diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index 6a51b3a078..d9658d900c 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -182,7 +182,7 @@ impl Cmd { return Err(Error::BuildOnlyNotSupported); } - let built_contracts = self.resolve_contracts(global_args)?; + let built_contracts = self.resolve_contracts(global_args).await?; // When --wasm-hash is used, no built contracts are returned. // Deploy directly with the hash. @@ -259,7 +259,7 @@ impl Cmd { Ok(()) } - fn resolve_contracts( + async fn resolve_contracts( &self, global_args: &global::Args, ) -> Result, Error> { @@ -282,7 +282,7 @@ impl Cmd { build_args: self.build_args.clone(), ..build::Cmd::default() }; - let contracts = build_cmd.run(global_args).map_err(|e| match e { + let contracts = build_cmd.run(global_args).await.map_err(|e| match e { build::Error::Metadata(_) => Error::NotInCargoProject, other => other.into(), })?; diff --git a/cmd/soroban-cli/src/commands/contract/mod.rs b/cmd/soroban-cli/src/commands/contract/mod.rs index 294a6d1b00..3e96400390 100644 --- a/cmd/soroban-cli/src/commands/contract/mod.rs +++ b/cmd/soroban-cli/src/commands/contract/mod.rs @@ -3,6 +3,7 @@ pub mod arg_parsing; pub mod asset; pub mod bindings; pub mod build; +pub mod build_docker; pub mod deploy; pub mod extend; pub mod fetch; @@ -16,6 +17,7 @@ pub mod read; pub mod restore; pub mod spec_verify; pub mod upload; +pub mod verify; use crate::{commands::global, print::Print, utils::deprecate_message}; @@ -100,6 +102,9 @@ pub enum Cmd { // run as part of `contract build` so for a general user this is not needed. #[command(name = "spec-verify", hide = true)] SpecVerify(spec_verify::Cmd), + + /// Verify a wasm by rebuilding from source in the recorded Docker image. + Verify(verify::Cmd), } #[derive(thiserror::Error, Debug)] @@ -154,6 +159,9 @@ pub enum Error { #[error(transparent)] SpecVerify(#[from] spec_verify::Error), + + #[error(transparent)] + Verify(#[from] verify::Error), } impl Cmd { @@ -164,7 +172,7 @@ impl Cmd { Cmd::Asset(asset) => asset.run(global_args).await?, Cmd::Bindings(bindings) => bindings.run().await?, Cmd::Build(build) => { - build.run(global_args)?; + build.run(global_args).await?; } Cmd::Extend(extend) => extend.run(global_args).await?, Cmd::Alias(alias) => alias.run(global_args)?, @@ -202,6 +210,7 @@ impl Cmd { Cmd::Read(read) => read.run().await?, Cmd::Restore(restore) => restore.run(global_args).await?, Cmd::SpecVerify(spec_verify) => spec_verify.run(global_args)?, + Cmd::Verify(verify) => verify.run(global_args).await?, } Ok(()) } diff --git a/cmd/soroban-cli/src/commands/contract/upload.rs b/cmd/soroban-cli/src/commands/contract/upload.rs index 203da3cc35..de521531ce 100644 --- a/cmd/soroban-cli/src/commands/contract/upload.rs +++ b/cmd/soroban-cli/src/commands/contract/upload.rs @@ -135,7 +135,7 @@ impl Cmd { return Err(Error::BuildOnlyNotSupported); } - let wasm_paths = self.resolve_wasm_paths(global_args)?; + let wasm_paths = self.resolve_wasm_paths(global_args).await?; for wasm_path in &wasm_paths { let res = self @@ -172,7 +172,7 @@ impl Cmd { self.upload_wasm(&wasm_path, config, quiet, no_cache).await } - fn resolve_wasm_paths(&self, global_args: &global::Args) -> Result, Error> { + async fn resolve_wasm_paths(&self, global_args: &global::Args) -> Result, Error> { if let Some(wasm) = &self.wasm { Ok(vec![wasm.clone()]) } else { @@ -181,7 +181,7 @@ impl Cmd { build_args: self.build_args.clone(), ..build::Cmd::default() }; - let contracts = build_cmd.run(global_args).map_err(|e| match e { + let contracts = build_cmd.run(global_args).await.map_err(|e| match e { build::Error::Metadata(_) => Error::NotInCargoProject, other => other.into(), })?; diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs new file mode 100644 index 0000000000..4063ac7560 --- /dev/null +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -0,0 +1,137 @@ +use std::fs; +use std::path::PathBuf; + +use clap::Parser; +use sha2::{Digest, Sha256}; +use soroban_spec_tools::contract::{self, Spec}; +use stellar_xdr::curr::{ScMetaEntry, ScMetaV0}; + +use crate::commands::global; +use crate::commands::version; +use crate::print::Print; + +use super::build; +use super::info::shared::{self, fetch, Contract, Fetched}; + +/// Verify that a wasm matches what would be produced by building its source. +/// +/// Re-runs the build inside the same Docker image (digest) recorded in the +/// wasm's contract metadata and compares the resulting wasm hash. Succeeds +/// only if the rebuilt artifact is byte-identical. +/// +/// Verify rebuilds from --source (default: current directory). The user is +/// responsible for checking out the right commit before running verify. +#[derive(Parser, Debug, Clone)] +#[group(skip)] +pub struct Cmd { + /// Source of the wasm to verify. Provide one of --wasm, --wasm-hash, --contract-id. + #[command(flatten)] + pub common: shared::Args, + + /// Path to the source tree (Cargo.toml directory) used to rebuild. + /// Defaults to current working directory. + #[arg(long, default_value = ".")] + pub source: PathBuf, + + /// Override the docker image read from the contract metadata. + /// Use only for debugging — overriding will normally cause a hash mismatch. + #[arg(long, value_name = "IMAGE", help_heading = "Advanced")] + pub docker: Option, +} + +#[derive(thiserror::Error, Debug)] +pub enum Error { + #[error(transparent)] + Shared(#[from] shared::Error), + #[error(transparent)] + Spec(#[from] contract::Error), + #[error(transparent)] + Build(#[from] build::Error), + #[error("stellar asset contract has no source to verify")] + StellarAssetContract, + #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --docker` to make it verifiable")] + MissingMeta(&'static str), + #[error("CLI version mismatch: contract metadata says '{expected}', running CLI is '{actual}'.\nInstall the matching CLI version and re-run `stellar contract verify`.")] + CliVersionMismatch { expected: String, actual: String }, + #[error("verification failed: rebuilt wasm does not match original (sha256 {expected})")] + Mismatch { expected: String }, + #[error("reading rebuilt wasm: {0}")] + ReadingRebuilt(std::io::Error), +} + +impl Cmd { + pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> { + let print = Print::new(global_args.quiet); + + let Fetched { contract, .. } = fetch(&self.common, &print).await?; + let wasm_bytes = match contract { + Contract::Wasm { wasm_bytes } => wasm_bytes, + Contract::StellarAssetContract => return Err(Error::StellarAssetContract), + }; + + let original_hash = hex::encode(Sha256::digest(&wasm_bytes)); + print.infoln(format!("Original wasm sha256: {original_hash}")); + + let spec = Spec::new(&wasm_bytes)?; + let cliver = find_meta(&spec, "cliver").ok_or(Error::MissingMeta("cliver"))?; + let bldimg = match &self.docker { + Some(image) => image.clone(), + None => find_meta(&spec, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, + }; + + let running_cliver = version::one_line(); + if cliver != running_cliver { + return Err(Error::CliVersionMismatch { + expected: cliver, + actual: running_cliver, + }); + } + print.infoln(format!("CLI version matches: {running_cliver}")); + + print.infoln(format!("Rebuilding with docker image {bldimg}...")); + let build_cmd = build::Cmd { + manifest_path: Some(self.source.join("Cargo.toml")), + docker: Some(bldimg), + ..build::Cmd::default() + }; + let built = build_cmd.run(global_args).await?; + + let mut hashes: Vec<(String, String)> = Vec::with_capacity(built.len()); + let mut matched: Option = None; + for c in &built { + let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; + let hash = hex::encode(Sha256::digest(&bytes)); + if hash == original_hash { + matched = Some(c.name.clone()); + } + hashes.push((c.name.clone(), hash)); + } + + if let Some(name) = matched { + eprintln!( + "✅ Verified: rebuilt wasm matches the original (sha256 {original_hash}) — {name}" + ); + Ok(()) + } else { + eprintln!("⚠ Verification failed: rebuilt wasm does not match original."); + eprintln!(" Built artifacts:"); + for (name, hash) in &hashes { + eprintln!(" {name} {hash}"); + } + Err(Error::Mismatch { + expected: original_hash, + }) + } + } +} + +fn find_meta(spec: &Spec, key: &str) -> Option { + spec.meta.iter().find_map(|meta_entry| { + let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = meta_entry; + if k.to_string() == key { + Some(val.to_string()) + } else { + None + } + }) +} From f135de098e3c3a61def43fb7db6520ba714f349f Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Mon, 27 Apr 2026 14:29:36 +0000 Subject: [PATCH 02/64] fix path remap; pass docker-host to verify; tests --- .../src/commands/contract/build.rs | 40 ++++++- .../src/commands/contract/build_docker.rs | 97 ++++++++++++++--- .../src/commands/contract/verify.rs | 102 +++++++++++++++--- 3 files changed, 213 insertions(+), 26 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 7065ea3812..dc101cb59c 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -104,6 +104,8 @@ pub struct Cmd { /// /// With a value: uses the specified image. Pin via `@sha256:...` /// for fully-reproducible builds. + /// + /// Aborted builds may leave a stopped container; clean with `docker container prune`. #[arg( long, num_args = 0..=1, @@ -323,7 +325,9 @@ impl Cmd { } let mut env_vars: Vec<(String, String)> = Vec::new(); - if let Some(rustflags) = make_rustflags_to_remap_absolute_paths(&print)? { + if let Some(rustflags) = + make_rustflags_to_remap_absolute_paths(&print, self.docker.is_some())? + { env_vars.push(("CARGO_BUILD_RUSTFLAGS".to_string(), rustflags)); } env_vars.push(( @@ -765,7 +769,21 @@ fn serialize_command(cmd: &Command) -> String { /// the absolute path replacement. Non-Unicode `CARGO_BUILD_RUSTFLAGS` will result in the /// existing rustflags being ignored, which is also the behavior of /// Cargo itself. -fn make_rustflags_to_remap_absolute_paths(print: &Print) -> Result, Error> { +fn make_rustflags_to_remap_absolute_paths( + print: &Print, + in_docker: bool, +) -> Result, Error> { + // Inside the container the cargo registry is always mounted at + // /usr/local/cargo/registry and the workspace at /work, so the host's + // env vars (RUSTFLAGS, cargo_home) are irrelevant — the container does + // not inherit them. Use fixed container paths so two hosts produce the + // same wasm. + if in_docker { + return Ok(Some( + "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/work=".to_string(), + )); + } + let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; if format!("{}", cargo_home.display()) @@ -971,4 +989,22 @@ mod tests { "shlex round-trip failed: {raw_arg:?} not found as a single token in {tokens:?}" ); } + + #[test] + fn parse_docker_image_rejects_empty() { + let err = parse_docker_image("").unwrap_err(); + assert!(err.contains("image cannot be empty"), "got: {err}"); + } + + #[test] + fn parse_docker_image_accepts_non_empty() { + assert_eq!( + parse_docker_image("docker.io/library/rust:latest").unwrap(), + "docker.io/library/rust:latest" + ); + assert_eq!( + parse_docker_image("name@sha256:abc").unwrap(), + "name@sha256:abc" + ); + } } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index b456334708..f0ae4fe3c1 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -69,7 +69,7 @@ pub struct DockerRun<'a> { pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result { let docker: Docker = match run.container_args.connect_to_docker(run.print).await { Ok(d) => d, - Err(e) => return Err(map_connect_error(e)), + Err(e) => return Err(Error::DockerNotRunning(e)), }; pull_image(&docker, &run.image_ref, run.print).await?; @@ -108,6 +108,8 @@ pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result Result<(), E } async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { - if let Some(digest) = parse_pinned_digest(image) { - return Ok(format!("{}@{}", strip_digest(image), digest)); + if let Some((name, digest)) = parse_pinned_digest(image) { + return Ok(format!("{name}@{digest}")); } let info = docker .inspect_image(image) @@ -210,19 +212,30 @@ async fn resolve_image_digest(docker: &Docker, image: &str) -> Result Option { - let (_, after) = image.rsplit_once('@')?; +/// Parse a `name@sha256:...` reference into `(name, digest)`. Returns `None` +/// if the reference does not contain a `@sha256:` digest. +fn parse_pinned_digest(image: &str) -> Option<(&str, &str)> { + let (name, after) = image.rsplit_once('@')?; if after.starts_with("sha256:") { - Some(after.to_string()) + Some((name, after)) } else { None } @@ -232,8 +245,21 @@ fn strip_digest(image: &str) -> &str { image.split_once('@').map_or(image, |(name, _)| name) } -fn map_connect_error(e: ContainerError) -> Error { - Error::DockerNotRunning(e) +/// Strip both `@sha256:...` and `:tag`, leaving just the repository name. +fn strip_tag(image: &str) -> &str { + let no_digest = strip_digest(image); + // A `:` in the host portion (e.g. `host:5000/name`) is not a tag separator. + // Tags only appear after the last `/`. + match no_digest.rfind('/') { + Some(slash) => match no_digest[slash + 1..].rfind(':') { + Some(colon) => &no_digest[..slash + 1 + colon], + None => no_digest, + }, + None => match no_digest.rfind(':') { + Some(colon) => &no_digest[..colon], + None => no_digest, + }, + } } #[allow(clippy::unnecessary_wraps)] @@ -336,3 +362,50 @@ pub fn source_date_epoch(workspace_root: &Path) -> String { } "0".to_string() } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_pinned_digest_cases() { + assert_eq!(parse_pinned_digest("name"), None); + assert_eq!(parse_pinned_digest("name:tag"), None); + assert_eq!( + parse_pinned_digest("name@sha256:abc"), + Some(("name", "sha256:abc")) + ); + assert_eq!( + parse_pinned_digest("host/path/name@sha256:abc"), + Some(("host/path/name", "sha256:abc")) + ); + assert_eq!( + parse_pinned_digest("host:5000/name:tag@sha256:abc"), + Some(("host:5000/name:tag", "sha256:abc")) + ); + // Non-sha256 algorithms are not recognized. + assert_eq!(parse_pinned_digest("name@md5:abc"), None); + } + + #[test] + fn strip_digest_cases() { + assert_eq!(strip_digest("name"), "name"); + assert_eq!(strip_digest("name:tag"), "name:tag"); + assert_eq!(strip_digest("name@sha256:abc"), "name"); + assert_eq!( + strip_digest("host/name:tag@sha256:abc"), + "host/name:tag" + ); + } + + #[test] + fn strip_tag_cases() { + assert_eq!(strip_tag("name"), "name"); + assert_eq!(strip_tag("name:tag"), "name"); + assert_eq!(strip_tag("host/name:tag"), "host/name"); + assert_eq!(strip_tag("host:5000/name"), "host:5000/name"); + assert_eq!(strip_tag("host:5000/name:tag"), "host:5000/name"); + assert_eq!(strip_tag("name@sha256:abc"), "name"); + assert_eq!(strip_tag("host:5000/name:tag@sha256:abc"), "host:5000/name"); + } +} diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 4063ac7560..4408a75b17 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -1,3 +1,4 @@ +use std::fmt::Write as _; use std::fs; use std::path::PathBuf; @@ -6,6 +7,7 @@ use sha2::{Digest, Sha256}; use soroban_spec_tools::contract::{self, Spec}; use stellar_xdr::curr::{ScMetaEntry, ScMetaV0}; +use crate::commands::container::shared::Args as ContainerArgs; use crate::commands::global; use crate::commands::version; use crate::print::Print; @@ -37,6 +39,9 @@ pub struct Cmd { /// Use only for debugging — overriding will normally cause a hash mismatch. #[arg(long, value_name = "IMAGE", help_heading = "Advanced")] pub docker: Option, + + #[command(flatten)] + pub container_args: ContainerArgs, } #[derive(thiserror::Error, Debug)] @@ -53,12 +58,27 @@ pub enum Error { MissingMeta(&'static str), #[error("CLI version mismatch: contract metadata says '{expected}', running CLI is '{actual}'.\nInstall the matching CLI version and re-run `stellar contract verify`.")] CliVersionMismatch { expected: String, actual: String }, - #[error("verification failed: rebuilt wasm does not match original (sha256 {expected})")] - Mismatch { expected: String }, + #[error("{}", format_mismatch(expected, produced))] + Mismatch { + expected: String, + produced: Vec<(String, String, PathBuf)>, + }, + #[error("no Cargo.toml found at {0}; pass --source to point at the contract's source tree")] + SourceNotFound(PathBuf), #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), } +fn format_mismatch(expected: &str, produced: &[(String, String, PathBuf)]) -> String { + let mut s = format!( + "verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:" + ); + for (name, hash, path) in produced { + let _ = write!(s, "\n {name} sha256:{hash} {}", path.display()); + } + s +} + impl Cmd { pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> { let print = Print::new(global_args.quiet); @@ -73,10 +93,10 @@ impl Cmd { print.infoln(format!("Original wasm sha256: {original_hash}")); let spec = Spec::new(&wasm_bytes)?; - let cliver = find_meta(&spec, "cliver").ok_or(Error::MissingMeta("cliver"))?; + let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = match &self.docker { Some(image) => image.clone(), - None => find_meta(&spec, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, + None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, }; let running_cliver = version::one_line(); @@ -88,15 +108,21 @@ impl Cmd { } print.infoln(format!("CLI version matches: {running_cliver}")); + let manifest_path = self.source.join("Cargo.toml"); + if !manifest_path.exists() { + return Err(Error::SourceNotFound(self.source.clone())); + } + print.infoln(format!("Rebuilding with docker image {bldimg}...")); let build_cmd = build::Cmd { - manifest_path: Some(self.source.join("Cargo.toml")), + manifest_path: Some(manifest_path), docker: Some(bldimg), + container_args: self.container_args.clone(), ..build::Cmd::default() }; let built = build_cmd.run(global_args).await?; - let mut hashes: Vec<(String, String)> = Vec::with_capacity(built.len()); + let mut produced: Vec<(String, String, PathBuf)> = Vec::with_capacity(built.len()); let mut matched: Option = None; for c in &built { let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; @@ -104,30 +130,33 @@ impl Cmd { if hash == original_hash { matched = Some(c.name.clone()); } - hashes.push((c.name.clone(), hash)); + produced.push((c.name.clone(), hash, c.path.clone())); } if let Some(name) = matched { + // Intentional: bypasses --quiet because the pass/fail verdict is the primary output of this command. eprintln!( "✅ Verified: rebuilt wasm matches the original (sha256 {original_hash}) — {name}" ); Ok(()) } else { + // Intentional: bypasses --quiet because the pass/fail verdict is the primary output of this command. eprintln!("⚠ Verification failed: rebuilt wasm does not match original."); eprintln!(" Built artifacts:"); - for (name, hash) in &hashes { - eprintln!(" {name} {hash}"); + for (name, hash, path) in &produced { + eprintln!(" {name} sha256:{hash} {}", path.display()); } Err(Error::Mismatch { expected: original_hash, + produced, }) } } } -fn find_meta(spec: &Spec, key: &str) -> Option { - spec.meta.iter().find_map(|meta_entry| { - let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = meta_entry; +fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { + meta.iter().find_map(|entry| { + let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = entry; if k.to_string() == key { Some(val.to_string()) } else { @@ -135,3 +164,52 @@ fn find_meta(spec: &Spec, key: &str) -> Option { } }) } + +#[cfg(test)] +mod tests { + use super::*; + + fn entry(key: &str, val: &str) -> ScMetaEntry { + ScMetaEntry::ScMetaV0(ScMetaV0 { + key: key.to_string().try_into().unwrap(), + val: val.to_string().try_into().unwrap(), + }) + } + + #[test] + fn find_meta_first_index() { + let meta = vec![entry("cliver", "v1"), entry("bldimg", "img@sha256:abc")]; + assert_eq!(find_meta(&meta, "cliver"), Some("v1".to_string())); + } + + #[test] + fn find_meta_later_index() { + let meta = vec![ + entry("cliver", "v1"), + entry("other", "x"), + entry("bldimg", "img@sha256:abc"), + ]; + assert_eq!( + find_meta(&meta, "bldimg"), + Some("img@sha256:abc".to_string()) + ); + } + + #[test] + fn find_meta_missing() { + let meta = vec![entry("cliver", "v1")]; + assert_eq!(find_meta(&meta, "bldimg"), None); + } + + #[test] + fn find_meta_exact_key_not_prefix() { + let meta = vec![entry("bldimg2", "wrong"), entry("bldimg", "right")]; + assert_eq!(find_meta(&meta, "bldimg"), Some("right".to_string())); + } + + #[test] + fn find_meta_empty() { + let meta: Vec = Vec::new(); + assert_eq!(find_meta(&meta, "cliver"), None); + } +} From 77225ef631da11e5358939ed55327db5920a9f2c Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Mon, 27 Apr 2026 15:14:06 +0000 Subject: [PATCH 03/64] minimise diff against main --- .../src/commands/contract/build.rs | 179 +++------- .../src/commands/contract/build_docker.rs | 319 +++++------------- .../src/commands/contract/verify.rs | 100 ++---- 3 files changed, 169 insertions(+), 429 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index dc101cb59c..68b4857f84 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -97,15 +97,10 @@ pub struct Cmd { #[arg(long, conflicts_with = "out_dir", help_heading = "Other")] pub print_commands_only: bool, - /// Run `cargo rustc` inside a Docker container for reproducible builds. - /// - /// Without a value: pulls `docker.io/library/rust:latest` (linux/amd64) - /// and records the resolved image digest in contract metadata. - /// - /// With a value: uses the specified image. Pin via `@sha256:...` - /// for fully-reproducible builds. - /// - /// Aborted builds may leave a stopped container; clean with `docker container prune`. + /// Run inside a Docker container (linux/amd64) for reproducible builds. + /// The resolved image digest is recorded in contract metadata. Pin via + /// `--docker=@sha256:...` for fully-reproducible builds. Aborted + /// builds may leave a stopped container; clean with `docker container prune`. #[arg( long, num_args = 0..=1, @@ -261,11 +256,11 @@ impl Cmd { let metadata = self.metadata()?; let packages = self.packages(&metadata)?; let target_dir = &metadata.target_directory; - let workspace_root = metadata.workspace_root.as_std_path().to_path_buf(); + let workspace_root = metadata.workspace_root.as_std_path(); // Run build configuration checks (only when actually building) if !self.print_commands_only { - run_checks(&workspace_root, &self.profile)?; + run_checks(workspace_root, &self.profile)?; } if let Some(package) = &self.package { @@ -277,123 +272,91 @@ impl Cmd { } let wasm_target = get_wasm_target()?; - // When building inside Docker, force --locked to keep builds deterministic. - let locked = self.locked || self.docker.is_some(); let mut built_contracts = Vec::new(); for p in packages { - // Build cargo args + env once; both host and docker paths consume them. + let mut cmd = Command::new("cargo"); + cmd.stdout(Stdio::piped()); + cmd.arg("rustc"); + // Force --locked when building inside Docker so the build is deterministic. + if self.locked || self.docker.is_some() { + cmd.arg("--locked"); + } let manifest_path = if self.docker.is_some() { - // Inside the container the workspace is mounted at /work, so make - // the manifest path relative to the workspace root. - let rel = pathdiff::diff_paths(&p.manifest_path, &workspace_root) + // Inside the container the workspace is mounted at /work. + let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) .unwrap_or(p.manifest_path.clone().into()); Path::new(build_docker::WORK_DIR).join(rel) } else { pathdiff::diff_paths(&p.manifest_path, &working_dir) .unwrap_or(p.manifest_path.clone().into()) }; - - let mut cargo_args: Vec = Vec::new(); - if locked { - cargo_args.push("--locked".to_string()); - } - cargo_args.push(format!( + cmd.arg(format!( "--manifest-path={}", manifest_path.to_string_lossy() )); - cargo_args.push("--crate-type=cdylib".to_string()); - cargo_args.push(format!("--target={wasm_target}")); + cmd.arg("--crate-type=cdylib"); + cmd.arg(format!("--target={wasm_target}")); if self.profile == "release" { - cargo_args.push("--release".to_string()); + cmd.arg("--release"); } else { - cargo_args.push(format!("--profile={}", self.profile)); + cmd.arg(format!("--profile={}", self.profile)); } if self.all_features { - cargo_args.push("--all-features".to_string()); + cmd.arg("--all-features"); } if self.no_default_features { - cargo_args.push("--no-default-features".to_string()); + cmd.arg("--no-default-features"); } if let Some(features) = self.features() { let requested: HashSet = features.iter().cloned().collect(); let available = p.features.iter().map(|f| f.0).cloned().collect(); let activate = requested.intersection(&available).join(","); if !activate.is_empty() { - cargo_args.push(format!("--features={activate}")); + cmd.arg(format!("--features={activate}")); } } - let mut env_vars: Vec<(String, String)> = Vec::new(); if let Some(rustflags) = make_rustflags_to_remap_absolute_paths(&print, self.docker.is_some())? { - env_vars.push(("CARGO_BUILD_RUSTFLAGS".to_string(), rustflags)); + cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); } - env_vars.push(( - "SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2".to_string(), - "1".to_string(), - )); - // Resolved image digest, populated only on the docker path. Embedded - // in contract metadata as `bldimg`. - let mut bldimg: Option = None; - - if let Some(image) = &self.docker { - let source_date_epoch = build_docker::source_date_epoch(&workspace_root); - if self.print_commands_only { - let line = build_docker::print_docker_command( - &workspace_root, - target_dir.as_std_path(), - &cargo_args, - &env_vars, - image, - &source_date_epoch, - )?; - println!("{line}"); - continue; - } + // Set env var to inform the SDK that this CLI supports spec + // optimization using markers. + cmd.env("SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2", "1"); - let summary = format_docker_summary(image, &cargo_args, &env_vars); - print.infoln(summary); - - let resolved = build_docker::run_cargo_rustc_in_docker(build_docker::DockerRun { - workspace_root: &workspace_root, - target_dir: target_dir.as_std_path(), - cargo_args: cargo_args.clone(), - env_vars: env_vars.clone(), - image_ref: image.clone(), - source_date_epoch, - container_args: &self.container_args, - print: &print, - }) - .await?; - bldimg = Some(resolved); - } else { - let mut cmd = Command::new("cargo"); - cmd.stdout(Stdio::piped()); - cmd.arg("rustc"); - for a in &cargo_args { - cmd.arg(a); - } - for (k, v) in &env_vars { - cmd.env(k, v); - } + let cmd_str = serialize_command(&cmd); - let cmd_str = serialize_command(&cmd); - - if self.print_commands_only { - println!("{cmd_str}"); - continue; - } - print.infoln(cmd_str); - let status = cmd.status().map_err(Error::CargoCmd)?; - if !status.success() { - return Err(Error::Exit(status)); + if self.print_commands_only { + if let Some(image) = &self.docker { + println!("# inside docker image: {image}"); } - } + println!("{cmd_str}"); + } else { + let bldimg = if let Some(image) = &self.docker { + print.infoln(format!("docker[{image}] {cmd_str}")); + Some( + build_docker::run_in_docker( + &cmd, + image, + workspace_root, + target_dir.as_std_path(), + &self.container_args, + &print, + ) + .await?, + ) + } else { + print.infoln(cmd_str); + let status = cmd.status().map_err(Error::CargoCmd)?; + if !status.success() { + return Err(Error::Exit(status)); + } + None + }; - { let wasm_name = p.name.replace('-', "_"); let file = format!("{wasm_name}.wasm"); let target_file_path = Path::new(target_dir) @@ -687,23 +650,6 @@ impl Cmd { } } -fn format_docker_summary(image: &str, cargo_args: &[String], env_vars: &[(String, String)]) -> String { - let mut parts = Vec::::new(); - parts.push(format!("docker[{image}]")); - for (k, v) in env_vars { - parts.push(format!( - "{k}={}", - shell_escape::escape(v.into()).into_owned() - )); - } - parts.push("cargo".to_string()); - parts.push("rustc".to_string()); - for a in cargo_args { - parts.push(shell_escape::escape(a.into()).into_owned()); - } - parts.join(" ") -} - fn serialize_command(cmd: &Command) -> String { let mut parts = Vec::::new(); parts.extend(cmd.get_envs().map(|(key, val)| { @@ -990,21 +936,4 @@ mod tests { ); } - #[test] - fn parse_docker_image_rejects_empty() { - let err = parse_docker_image("").unwrap_err(); - assert!(err.contains("image cannot be empty"), "got: {err}"); - } - - #[test] - fn parse_docker_image_accepts_non_empty() { - assert_eq!( - parse_docker_image("docker.io/library/rust:latest").unwrap(), - "docker.io/library/rust:latest" - ); - assert_eq!( - parse_docker_image("name@sha256:abc").unwrap(), - "name@sha256:abc" - ); - } } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index f0ae4fe3c1..962f33376d 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -1,4 +1,6 @@ +use std::ffi::OsStr; use std::path::Path; +use std::process::Command; use bollard::{ models::ContainerCreateBody, @@ -16,7 +18,6 @@ use crate::{ print::Print, }; -pub const DEFAULT_IMAGE: &str = "docker.io/library/rust:latest"; const PLATFORM: &str = "linux/amd64"; pub const WORK_DIR: &str = "/work"; const TARGET_DIR: &str = "/target"; @@ -52,63 +53,58 @@ pub enum Error { CargoHome(std::io::Error), } -/// Inputs for a single `cargo rustc` invocation inside a container. -pub struct DockerRun<'a> { - pub workspace_root: &'a Path, - pub target_dir: &'a Path, - pub cargo_args: Vec, - pub env_vars: Vec<(String, String)>, - pub image_ref: String, - pub source_date_epoch: String, - pub container_args: &'a ContainerArgs, - pub print: &'a Print, -} - -/// Pull (if needed), run `cargo rustc` inside the container, and return the -/// fully-qualified image reference including the resolved `@sha256:...` digest. -pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result { - let docker: Docker = match run.container_args.connect_to_docker(run.print).await { - Ok(d) => d, - Err(e) => return Err(Error::DockerNotRunning(e)), - }; +/// Pull (if needed), run the host `cmd` (its program and args) inside a +/// linux/amd64 container, and return the resolved `name@sha256:...` reference. +pub async fn run_in_docker( + cmd: &Command, + image: &str, + workspace_root: &Path, + target_dir: &Path, + container_args: &ContainerArgs, + print: &Print, +) -> Result { + let docker: Docker = container_args + .connect_to_docker(print) + .await + .map_err(Error::DockerNotRunning)?; - pull_image(&docker, &run.image_ref, run.print).await?; - let resolved_image = resolve_image_digest(&docker, &run.image_ref).await?; + pull_image(&docker, image, print).await?; + let resolved = resolve_image_digest(&docker, image).await?; let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; - let registry = cargo_home.join("registry"); - let binds = vec![ - format!("{}:{}", run.workspace_root.display(), WORK_DIR), - format!("{}:{}", run.target_dir.display(), TARGET_DIR), - format!("{}:{}", registry.display(), REGISTRY_DIR), + format!("{}:{}", workspace_root.display(), WORK_DIR), + format!("{}:{}", target_dir.display(), TARGET_DIR), + format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), ]; - let mut env: Vec = run - .env_vars - .iter() - .map(|(k, v)| format!("{k}={v}")) + let mut env: Vec = cmd + .get_envs() + .filter_map(|(k, v)| { + v.map(|val| format!("{}={}", k.to_string_lossy(), val.to_string_lossy())) + }) .collect(); env.push(format!("CARGO_TARGET_DIR={TARGET_DIR}")); - env.push(format!("SOURCE_DATE_EPOCH={}", run.source_date_epoch)); + env.push(format!("SOURCE_DATE_EPOCH={}", source_date_epoch(workspace_root))); - let mut cmd = vec!["cargo".to_string(), "rustc".to_string()]; - cmd.extend(run.cargo_args.iter().cloned()); - - let user = current_uid_gid(); + let container_cmd: Vec = std::iter::once(cmd.get_program()) + .chain(cmd.get_args()) + .map(OsStr::to_string_lossy) + .map(std::borrow::Cow::into_owned) + .collect(); let config = ContainerCreateBody { - image: Some(resolved_image.clone()), - cmd: Some(cmd), + image: Some(resolved.clone()), + cmd: Some(container_cmd), env: Some(env), working_dir: Some(WORK_DIR.to_string()), - user, + user: current_uid_gid(), attach_stdout: Some(true), attach_stderr: Some(true), host_config: Some(HostConfig { binds: Some(binds), network_mode: Some("none".to_string()), - // auto_remove=false so we can stream logs, then call + // auto_remove=false so we can stream logs first, then call // remove_container ourselves with force=true even on failure paths. auto_remove: Some(false), ..Default::default() @@ -116,12 +112,12 @@ pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result, config) - .await?; - let container_id = create_resp.id; + .await? + .id; - let result = run_and_wait(&docker, &container_id, run.print).await; + let result = run_and_wait(&docker, &container_id, print).await; let _ = docker .remove_container( @@ -134,8 +130,7 @@ pub async fn run_cargo_rustc_in_docker(run: DockerRun<'_>) -> Result Result<(), Error> { @@ -143,23 +138,20 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res .start_container(container_id, None::) .await?; - let logs_opts = LogsOptions { - follow: true, - stdout: true, - stderr: true, - ..Default::default() - }; - let mut log_stream = docker.logs(container_id, Some(logs_opts)); + let mut log_stream = docker.logs( + container_id, + Some(LogsOptions { + follow: true, + stdout: true, + stderr: true, + ..Default::default() + }), + ); while let Some(item) = log_stream.next().await { - match item { - Ok(out) => { - let s = out.to_string(); - let s = s.trim_end_matches('\n'); - if !s.is_empty() { - print.infoln(s); - } - } - Err(e) => return Err(Error::DockerRun(e)), + let s = item?.to_string(); + let s = s.trim_end_matches('\n'); + if !s.is_empty() { + print.infoln(s); } } @@ -168,9 +160,7 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res while let Some(res) = wait_stream.next().await { match res { Ok(r) => exit_code = r.status_code, - Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => { - exit_code = code; - } + Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => exit_code = code, Err(e) => return Err(Error::DockerRun(e)), } } @@ -181,12 +171,15 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res } async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { - let opts = CreateImageOptions { - from_image: Some(image.to_string()), - platform: PLATFORM.to_string(), - ..Default::default() - }; - let mut stream = docker.create_image(Some(opts), None, None); + let mut stream = docker.create_image( + Some(CreateImageOptions { + from_image: Some(image.to_string()), + platform: PLATFORM.to_string(), + ..Default::default() + }), + None, + None, + ); while let Some(item) = stream.try_next().await.map_err(|e| Error::DockerImagePull { image: image.to_string(), source: e, @@ -200,9 +193,11 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E Ok(()) } +// We pull with --platform=linux/amd64 so the recorded digest is platform-specific; +// reproducibility on `verify` depends on always pulling with that same platform. async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { - if let Some((name, digest)) = parse_pinned_digest(image) { - return Ok(format!("{name}@{digest}")); + if parse_pinned_digest(image).is_some() { + return Ok(image.to_string()); } let info = docker .inspect_image(image) @@ -211,65 +206,27 @@ async fn resolve_image_digest(docker: &Docker, image: &str) -> Result Option<(&str, &str)> { let (name, after) = image.rsplit_once('@')?; - if after.starts_with("sha256:") { - Some((name, after)) - } else { - None - } -} - -fn strip_digest(image: &str) -> &str { - image.split_once('@').map_or(image, |(name, _)| name) -} - -/// Strip both `@sha256:...` and `:tag`, leaving just the repository name. -fn strip_tag(image: &str) -> &str { - let no_digest = strip_digest(image); - // A `:` in the host portion (e.g. `host:5000/name`) is not a tag separator. - // Tags only appear after the last `/`. - match no_digest.rfind('/') { - Some(slash) => match no_digest[slash + 1..].rfind(':') { - Some(colon) => &no_digest[..slash + 1 + colon], - None => no_digest, - }, - None => match no_digest.rfind(':') { - Some(colon) => &no_digest[..colon], - None => no_digest, - }, - } + after.starts_with("sha256:").then_some((name, after)) } #[allow(clippy::unnecessary_wraps)] #[cfg(unix)] fn current_uid_gid() -> Option { - // SAFETY: getuid/getgid are infallible POSIX calls returning the real - // user/group ID of the calling process. - let uid = unsafe { libc::getuid() }; - let gid = unsafe { libc::getgid() }; - Some(format!("{uid}:{gid}")) + // SAFETY: getuid/getgid are infallible POSIX calls. + Some(format!("{}:{}", unsafe { libc::getuid() }, unsafe { + libc::getgid() + })) } #[cfg(not(unix))] @@ -277,90 +234,19 @@ fn current_uid_gid() -> Option { None } -/// Build the equivalent `docker run ...` command line for `--print-commands-only`. -pub fn print_docker_command( - workspace_root: &Path, - target_dir: &Path, - cargo_args: &[String], - env_vars: &[(String, String)], - image_ref: &str, - source_date_epoch: &str, -) -> Result { - let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; - let registry = cargo_home.join("registry"); - - let mut parts: Vec = vec![ - "docker".to_string(), - "run".to_string(), - "--rm".to_string(), - format!("--platform={PLATFORM}"), - "--network=none".to_string(), - format!("-w {WORK_DIR}"), - ]; - - if let Some(user) = current_uid_gid() { - parts.push(format!("-u {user}")); - } - - parts.push(shell_escape_kv( - "-v", - &format!("{}:{}", workspace_root.display(), WORK_DIR), - )); - parts.push(shell_escape_kv( - "-v", - &format!("{}:{}", target_dir.display(), TARGET_DIR), - )); - parts.push(shell_escape_kv( - "-v", - &format!("{}:{}", registry.display(), REGISTRY_DIR), - )); - - for (k, v) in env_vars { - parts.push(shell_escape_kv("-e", &format!("{k}={v}"))); - } - parts.push(shell_escape_kv( - "-e", - &format!("CARGO_TARGET_DIR={TARGET_DIR}"), - )); - parts.push(shell_escape_kv( - "-e", - &format!("SOURCE_DATE_EPOCH={source_date_epoch}"), - )); - - parts.push(shell_escape::escape(image_ref.into()).into_owned()); - parts.push("cargo".to_string()); - parts.push("rustc".to_string()); - for a in cargo_args { - parts.push(shell_escape::escape(a.into()).into_owned()); - } - - Ok(parts.join(" ")) -} - -fn shell_escape_kv(flag: &str, value: &str) -> String { - format!( - "{flag} {}", - shell_escape::escape(value.into()).into_owned() - ) -} - -/// Best-effort SOURCE_DATE_EPOCH derived from the workspace's HEAD commit time. -/// Falls back to `"0"` when not in a git repo or git is unavailable. -pub fn source_date_epoch(workspace_root: &Path) -> String { - let output = std::process::Command::new("git") +/// Best-effort SOURCE_DATE_EPOCH from the workspace's HEAD commit time; +/// falls back to `"0"` when not in a git repo. +fn source_date_epoch(workspace_root: &Path) -> String { + Command::new("git") .arg("-C") .arg(workspace_root) .args(["log", "-1", "--format=%ct"]) - .output(); - if let Ok(out) = output { - if out.status.success() { - let s = String::from_utf8_lossy(&out.stdout).trim().to_string(); - if !s.is_empty() { - return s; - } - } - } - "0".to_string() + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .filter(|s| !s.is_empty()) + .unwrap_or_else(|| "0".to_string()) } #[cfg(test)] @@ -371,41 +257,14 @@ mod tests { fn parse_pinned_digest_cases() { assert_eq!(parse_pinned_digest("name"), None); assert_eq!(parse_pinned_digest("name:tag"), None); + assert_eq!(parse_pinned_digest("name@md5:abc"), None); assert_eq!( parse_pinned_digest("name@sha256:abc"), Some(("name", "sha256:abc")) ); - assert_eq!( - parse_pinned_digest("host/path/name@sha256:abc"), - Some(("host/path/name", "sha256:abc")) - ); assert_eq!( parse_pinned_digest("host:5000/name:tag@sha256:abc"), Some(("host:5000/name:tag", "sha256:abc")) ); - // Non-sha256 algorithms are not recognized. - assert_eq!(parse_pinned_digest("name@md5:abc"), None); - } - - #[test] - fn strip_digest_cases() { - assert_eq!(strip_digest("name"), "name"); - assert_eq!(strip_digest("name:tag"), "name:tag"); - assert_eq!(strip_digest("name@sha256:abc"), "name"); - assert_eq!( - strip_digest("host/name:tag@sha256:abc"), - "host/name:tag" - ); - } - - #[test] - fn strip_tag_cases() { - assert_eq!(strip_tag("name"), "name"); - assert_eq!(strip_tag("name:tag"), "name"); - assert_eq!(strip_tag("host/name:tag"), "host/name"); - assert_eq!(strip_tag("host:5000/name"), "host:5000/name"); - assert_eq!(strip_tag("host:5000/name:tag"), "host:5000/name"); - assert_eq!(strip_tag("name@sha256:abc"), "name"); - assert_eq!(strip_tag("host:5000/name:tag@sha256:abc"), "host:5000/name"); } } diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 4408a75b17..795ea1355c 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -7,36 +7,27 @@ use sha2::{Digest, Sha256}; use soroban_spec_tools::contract::{self, Spec}; use stellar_xdr::curr::{ScMetaEntry, ScMetaV0}; -use crate::commands::container::shared::Args as ContainerArgs; -use crate::commands::global; -use crate::commands::version; -use crate::print::Print; - use super::build; use super::info::shared::{self, fetch, Contract, Fetched}; +use crate::commands::container::shared::Args as ContainerArgs; +use crate::commands::{global, version}; +use crate::print::Print; -/// Verify that a wasm matches what would be produced by building its source. +/// Verify a wasm by rebuilding it inside the Docker image recorded in its metadata. /// -/// Re-runs the build inside the same Docker image (digest) recorded in the -/// wasm's contract metadata and compares the resulting wasm hash. Succeeds -/// only if the rebuilt artifact is byte-identical. -/// -/// Verify rebuilds from --source (default: current directory). The user is -/// responsible for checking out the right commit before running verify. +/// Succeeds only if the rebuilt artifact is byte-identical to the input. +/// User is responsible for checking out the matching commit before running. #[derive(Parser, Debug, Clone)] #[group(skip)] pub struct Cmd { - /// Source of the wasm to verify. Provide one of --wasm, --wasm-hash, --contract-id. #[command(flatten)] pub common: shared::Args, - /// Path to the source tree (Cargo.toml directory) used to rebuild. - /// Defaults to current working directory. + /// Source tree (Cargo.toml directory) to rebuild from. Defaults to cwd. #[arg(long, default_value = ".")] pub source: PathBuf, - /// Override the docker image read from the contract metadata. - /// Use only for debugging — overriding will normally cause a hash mismatch. + /// Override the docker image read from the contract metadata. For debugging only. #[arg(long, value_name = "IMAGE", help_heading = "Advanced")] pub docker: Option, @@ -56,7 +47,7 @@ pub enum Error { StellarAssetContract, #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --docker` to make it verifiable")] MissingMeta(&'static str), - #[error("CLI version mismatch: contract metadata says '{expected}', running CLI is '{actual}'.\nInstall the matching CLI version and re-run `stellar contract verify`.")] + #[error("CLI version mismatch: contract says '{expected}', running CLI is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, #[error("{}", format_mismatch(expected, produced))] Mismatch { @@ -70,9 +61,7 @@ pub enum Error { } fn format_mismatch(expected: &str, produced: &[(String, String, PathBuf)]) -> String { - let mut s = format!( - "verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:" - ); + let mut s = format!("verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:"); for (name, hash, path) in produced { let _ = write!(s, "\n {name} sha256:{hash} {}", path.display()); } @@ -88,32 +77,28 @@ impl Cmd { Contract::Wasm { wasm_bytes } => wasm_bytes, Contract::StellarAssetContract => return Err(Error::StellarAssetContract), }; - let original_hash = hex::encode(Sha256::digest(&wasm_bytes)); print.infoln(format!("Original wasm sha256: {original_hash}")); let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; - let bldimg = match &self.docker { - Some(image) => image.clone(), - None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, - }; - - let running_cliver = version::one_line(); - if cliver != running_cliver { + let running = version::one_line(); + if cliver != running { return Err(Error::CliVersionMismatch { expected: cliver, - actual: running_cliver, + actual: running, }); } - print.infoln(format!("CLI version matches: {running_cliver}")); + let bldimg = match &self.docker { + Some(image) => image.clone(), + None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, + }; let manifest_path = self.source.join("Cargo.toml"); if !manifest_path.exists() { return Err(Error::SourceNotFound(self.source.clone())); } - print.infoln(format!("Rebuilding with docker image {bldimg}...")); let build_cmd = build::Cmd { manifest_path: Some(manifest_path), docker: Some(bldimg), @@ -122,8 +107,8 @@ impl Cmd { }; let built = build_cmd.run(global_args).await?; - let mut produced: Vec<(String, String, PathBuf)> = Vec::with_capacity(built.len()); - let mut matched: Option = None; + let mut produced = Vec::with_capacity(built.len()); + let mut matched = None; for c in &built { let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; let hash = hex::encode(Sha256::digest(&bytes)); @@ -133,19 +118,12 @@ impl Cmd { produced.push((c.name.clone(), hash, c.path.clone())); } + // Verdict bypasses --quiet because pass/fail is this command's primary output. if let Some(name) = matched { - // Intentional: bypasses --quiet because the pass/fail verdict is the primary output of this command. - eprintln!( - "✅ Verified: rebuilt wasm matches the original (sha256 {original_hash}) — {name}" - ); + eprintln!("✅ Verified: rebuilt wasm matches (sha256 {original_hash}) — {name}"); Ok(()) } else { - // Intentional: bypasses --quiet because the pass/fail verdict is the primary output of this command. eprintln!("⚠ Verification failed: rebuilt wasm does not match original."); - eprintln!(" Built artifacts:"); - for (name, hash, path) in &produced { - eprintln!(" {name} sha256:{hash} {}", path.display()); - } Err(Error::Mismatch { expected: original_hash, produced, @@ -157,11 +135,7 @@ impl Cmd { fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { meta.iter().find_map(|entry| { let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = entry; - if k.to_string() == key { - Some(val.to_string()) - } else { - None - } + (k.to_string() == key).then(|| val.to_string()) }) } @@ -177,39 +151,17 @@ mod tests { } #[test] - fn find_meta_first_index() { - let meta = vec![entry("cliver", "v1"), entry("bldimg", "img@sha256:abc")]; - assert_eq!(find_meta(&meta, "cliver"), Some("v1".to_string())); - } - - #[test] - fn find_meta_later_index() { + fn find_meta_returns_value_for_exact_key() { let meta = vec![ + entry("bldimg2", "wrong"), entry("cliver", "v1"), - entry("other", "x"), entry("bldimg", "img@sha256:abc"), ]; + assert_eq!(find_meta(&meta, "cliver"), Some("v1".to_string())); assert_eq!( find_meta(&meta, "bldimg"), Some("img@sha256:abc".to_string()) ); - } - - #[test] - fn find_meta_missing() { - let meta = vec![entry("cliver", "v1")]; - assert_eq!(find_meta(&meta, "bldimg"), None); - } - - #[test] - fn find_meta_exact_key_not_prefix() { - let meta = vec![entry("bldimg2", "wrong"), entry("bldimg", "right")]; - assert_eq!(find_meta(&meta, "bldimg"), Some("right".to_string())); - } - - #[test] - fn find_meta_empty() { - let meta: Vec = Vec::new(); - assert_eq!(find_meta(&meta, "cliver"), None); + assert_eq!(find_meta(&meta, "missing"), None); } } From 3108cae0113cace0c8a3a700c2dd96520a816e4e Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 01:30:55 +1000 Subject: [PATCH 04/64] update path mounts to workspace --- cmd/soroban-cli/src/commands/contract/build.rs | 6 +++--- cmd/soroban-cli/src/commands/contract/build_docker.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 68b4857f84..b5af013254 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -283,7 +283,7 @@ impl Cmd { cmd.arg("--locked"); } let manifest_path = if self.docker.is_some() { - // Inside the container the workspace is mounted at /work. + // Inside the container the workspace is mounted at /workspace. let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) .unwrap_or(p.manifest_path.clone().into()); Path::new(build_docker::WORK_DIR).join(rel) @@ -720,13 +720,13 @@ fn make_rustflags_to_remap_absolute_paths( in_docker: bool, ) -> Result, Error> { // Inside the container the cargo registry is always mounted at - // /usr/local/cargo/registry and the workspace at /work, so the host's + // /usr/local/cargo/registry and the workspace at /workspace, so the host's // env vars (RUSTFLAGS, cargo_home) are irrelevant — the container does // not inherit them. Use fixed container paths so two hosts produce the // same wasm. if in_docker { return Ok(Some( - "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/work=".to_string(), + "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/workspace=".to_string(), )); } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 962f33376d..3dad4ad0c5 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -19,7 +19,7 @@ use crate::{ }; const PLATFORM: &str = "linux/amd64"; -pub const WORK_DIR: &str = "/work"; +pub const WORK_DIR: &str = "/workspace"; const TARGET_DIR: &str = "/target"; const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; From b860a4f4faf82c3e2f214f5148ee2486e96ffb00 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 09:59:54 +1000 Subject: [PATCH 05/64] remove network mode setting --- cmd/soroban-cli/src/commands/contract/build_docker.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 3dad4ad0c5..cf390c6f53 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -103,7 +103,6 @@ pub async fn run_in_docker( attach_stderr: Some(true), host_config: Some(HostConfig { binds: Some(binds), - network_mode: Some("none".to_string()), // auto_remove=false so we can stream logs first, then call // remove_container ourselves with force=true even on failure paths. auto_remove: Some(false), From 144ad9366cf805af58fa86fcd70e3dd920a4edf2 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 13:13:14 +1000 Subject: [PATCH 06/64] add rustup toolchain to build command --- cmd/soroban-cli/src/commands/contract/build.rs | 13 ++++++++++++- .../src/commands/contract/build_docker.rs | 17 ++++++++++------- cmd/soroban-cli/src/commands/contract/verify.rs | 3 +++ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index b5af013254..ba4ef06ce9 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -115,6 +115,11 @@ pub struct Cmd { #[command(flatten)] pub container_args: ContainerArgs, + /// Pin RUSTUP_TOOLCHAIN inside the docker container. Set by `verify` from + /// the `rsver` meta entry; not user-facing. + #[arg(skip)] + pub rustup_toolchain: Option, + #[command(flatten)] pub build_args: BuildArgs, } @@ -242,6 +247,7 @@ impl Default for Cmd { print_commands_only: false, docker: None, container_args: ContainerArgs { docker_host: None }, + rustup_toolchain: None, build_args: BuildArgs::default(), } } @@ -327,6 +333,10 @@ impl Cmd { // optimization using markers. cmd.env("SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2", "1"); + if let Some(toolchain) = &self.rustup_toolchain { + cmd.env("RUSTUP_TOOLCHAIN", toolchain); + } + let cmd_str = serialize_command(&cmd); if self.print_commands_only { @@ -542,7 +552,8 @@ impl Cmd { }); new_meta.push(cli_meta_entry); - // Reproducible build image (only when --docker was used). + // Reproducible build image (only when --docker was used). The matching + // rustc version is recorded as `rsver` by soroban-sdk itself. if let Some(image) = bldimg { let key: StringM = "bldimg" .to_string() diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index cf390c6f53..f70e6c7446 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -22,6 +22,12 @@ const PLATFORM: &str = "linux/amd64"; pub const WORK_DIR: &str = "/workspace"; const TARGET_DIR: &str = "/target"; const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; +const RUSTUP_DIR: &str = "/usr/local/rustup"; +// Named docker volumes for cached state across runs. Reproducibility comes +// from --locked + checksums + the pinned image digest, so cache contents +// don't affect build output. +const CARGO_REGISTRY_VOLUME: &str = "stellar-cli-cargo-registry"; +const RUSTUP_VOLUME: &str = "stellar-cli-rustup"; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -48,13 +54,10 @@ pub enum Error { #[error("docker run: {0}")] DockerRun(#[from] bollard::errors::Error), - - #[error("resolving CARGO_HOME: {0}")] - CargoHome(std::io::Error), } -/// Pull (if needed), run the host `cmd` (its program and args) inside a -/// linux/amd64 container, and return the resolved `name@sha256:...` reference. +/// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, +/// returning the resolved `name@sha256:...` reference for embedding into meta. pub async fn run_in_docker( cmd: &Command, image: &str, @@ -71,11 +74,11 @@ pub async fn run_in_docker( pull_image(&docker, image, print).await?; let resolved = resolve_image_digest(&docker, image).await?; - let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; let binds = vec![ format!("{}:{}", workspace_root.display(), WORK_DIR), format!("{}:{}", target_dir.display(), TARGET_DIR), - format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), + format!("{CARGO_REGISTRY_VOLUME}:{REGISTRY_DIR}"), + format!("{RUSTUP_VOLUME}:{RUSTUP_DIR}"), ]; let mut env: Vec = cmd diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 795ea1355c..b7516859c4 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -93,6 +93,7 @@ impl Cmd { Some(image) => image.clone(), None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, }; + let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let manifest_path = self.source.join("Cargo.toml"); if !manifest_path.exists() { @@ -103,6 +104,7 @@ impl Cmd { manifest_path: Some(manifest_path), docker: Some(bldimg), container_args: self.container_args.clone(), + rustup_toolchain: Some(rsver), ..build::Cmd::default() }; let built = build_cmd.run(global_args).await?; @@ -164,4 +166,5 @@ mod tests { ); assert_eq!(find_meta(&meta, "missing"), None); } + } From 663f136e16c412d546926c66e224e88c18a91bcc Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 03:52:01 +0000 Subject: [PATCH 07/64] rustfmt --- .../src/commands/contract/build.rs | 8 +++---- .../src/commands/contract/build_docker.rs | 22 ++++++++++++++----- .../src/commands/contract/verify.rs | 9 +++++--- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index ba4ef06ce9..09c45527de 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,9 +19,7 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{ - container::shared::Args as ContainerArgs, contract::build_docker, global, version, - }, + commands::{container::shared::Args as ContainerArgs, contract::build_docker, global, version}, print::Print, wasm, }; @@ -737,7 +735,8 @@ fn make_rustflags_to_remap_absolute_paths( // same wasm. if in_docker { return Ok(Some( - "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/workspace=".to_string(), + "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/workspace=" + .to_string(), )); } @@ -946,5 +945,4 @@ mod tests { "shlex round-trip failed: {raw_arg:?} not found as a single token in {tokens:?}" ); } - } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index f70e6c7446..d1f6f40c4d 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -88,7 +88,10 @@ pub async fn run_in_docker( }) .collect(); env.push(format!("CARGO_TARGET_DIR={TARGET_DIR}")); - env.push(format!("SOURCE_DATE_EPOCH={}", source_date_epoch(workspace_root))); + env.push(format!( + "SOURCE_DATE_EPOCH={}", + source_date_epoch(workspace_root) + )); let container_cmd: Vec = std::iter::once(cmd.get_program()) .chain(cmd.get_args()) @@ -182,12 +185,19 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E None, None, ); - while let Some(item) = stream.try_next().await.map_err(|e| Error::DockerImagePull { - image: image.to_string(), - source: e, - })? { + while let Some(item) = stream + .try_next() + .await + .map_err(|e| Error::DockerImagePull { + image: image.to_string(), + source: e, + })? + { if let Some(status) = item.status { - if status.contains("Pulling from") || status.contains("Digest") || status.contains("Status") { + if status.contains("Pulling from") + || status.contains("Digest") + || status.contains("Status") + { print.infoln(status); } } diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index b7516859c4..3c63a1d1d8 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -54,14 +54,18 @@ pub enum Error { expected: String, produced: Vec<(String, String, PathBuf)>, }, - #[error("no Cargo.toml found at {0}; pass --source to point at the contract's source tree")] + #[error( + "no Cargo.toml found at {0}; pass --source to point at the contract's source tree" + )] SourceNotFound(PathBuf), #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), } fn format_mismatch(expected: &str, produced: &[(String, String, PathBuf)]) -> String { - let mut s = format!("verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:"); + let mut s = format!( + "verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:" + ); for (name, hash, path) in produced { let _ = write!(s, "\n {name} sha256:{hash} {}", path.display()); } @@ -166,5 +170,4 @@ mod tests { ); assert_eq!(find_meta(&meta, "missing"), None); } - } From aaccd7313518a3fff7a50f806644f64284a8e74b Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 20:19:42 +0000 Subject: [PATCH 08/64] use cargo +toolchain to pin rust version on verify --- cmd/soroban-cli/src/commands/contract/build.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 09c45527de..3e8d403a12 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -113,8 +113,8 @@ pub struct Cmd { #[command(flatten)] pub container_args: ContainerArgs, - /// Pin RUSTUP_TOOLCHAIN inside the docker container. Set by `verify` from - /// the `rsver` meta entry; not user-facing. + /// Run cargo via `cargo +` to pin the rust toolchain. Set by + /// `verify` from the wasm's `rsver` meta entry; not user-facing. #[arg(skip)] pub rustup_toolchain: Option, @@ -281,6 +281,12 @@ impl Cmd { for p in packages { let mut cmd = Command::new("cargo"); cmd.stdout(Stdio::piped()); + // `+` is rustup's explicit toolchain selector and overrides + // any `rust-toolchain.toml` in the workspace. Set by `verify` from + // the wasm's `rsver` meta entry. + if let Some(toolchain) = &self.rustup_toolchain { + cmd.arg(format!("+{toolchain}")); + } cmd.arg("rustc"); // Force --locked when building inside Docker so the build is deterministic. if self.locked || self.docker.is_some() { @@ -331,10 +337,6 @@ impl Cmd { // optimization using markers. cmd.env("SOROBAN_SDK_BUILD_SYSTEM_SUPPORTS_SPEC_SHAKING_V2", "1"); - if let Some(toolchain) = &self.rustup_toolchain { - cmd.env("RUSTUP_TOOLCHAIN", toolchain); - } - let cmd_str = serialize_command(&cmd); if self.print_commands_only { From c002c91cff1ca0d3d1d76d5d92ca8cdb7f9d30b9 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Tue, 28 Apr 2026 23:52:32 +0000 Subject: [PATCH 09/64] bind-mount cargo registry and rustup state from host --- .../src/commands/contract/build_docker.rs | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index d1f6f40c4d..8178bfc740 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -23,11 +23,6 @@ pub const WORK_DIR: &str = "/workspace"; const TARGET_DIR: &str = "/target"; const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; const RUSTUP_DIR: &str = "/usr/local/rustup"; -// Named docker volumes for cached state across runs. Reproducibility comes -// from --locked + checksums + the pinned image digest, so cache contents -// don't affect build output. -const CARGO_REGISTRY_VOLUME: &str = "stellar-cli-cargo-registry"; -const RUSTUP_VOLUME: &str = "stellar-cli-rustup"; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -54,6 +49,9 @@ pub enum Error { #[error("docker run: {0}")] DockerRun(#[from] bollard::errors::Error), + + #[error("resolving CARGO_HOME / RUSTUP_HOME: {0}")] + CargoHome(std::io::Error), } /// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, @@ -74,11 +72,17 @@ pub async fn run_in_docker( pull_image(&docker, image, print).await?; let resolved = resolve_image_digest(&docker, image).await?; + // Bind-mount the host's cargo registry and rustup state. Bind mounts + // preserve host ownership, so the container (running as the host user) + // can write to them. This caches crate downloads and installed + // toolchains across runs. + let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; + let rustup_home = home::rustup_home().map_err(Error::CargoHome)?; let binds = vec![ format!("{}:{}", workspace_root.display(), WORK_DIR), format!("{}:{}", target_dir.display(), TARGET_DIR), - format!("{CARGO_REGISTRY_VOLUME}:{REGISTRY_DIR}"), - format!("{RUSTUP_VOLUME}:{RUSTUP_DIR}"), + format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), + format!("{}:{}", rustup_home.display(), RUSTUP_DIR), ]; let mut env: Vec = cmd From b06473ffbebf8aaa1d0349efb9c5e185d745b87e Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 00:57:52 +0000 Subject: [PATCH 10/64] rename --docker to --backend with local|docker[=image]; fully-qualify bldimg --- .../src/commands/contract/build.rs | 94 +++++++++++++----- .../src/commands/contract/build_docker.rs | 99 +++++++++++++++---- .../src/commands/contract/verify.rs | 8 +- 3 files changed, 153 insertions(+), 48 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 3e8d403a12..ff9ec6ad8e 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -95,20 +95,23 @@ pub struct Cmd { #[arg(long, conflicts_with = "out_dir", help_heading = "Other")] pub print_commands_only: bool, - /// Run inside a Docker container (linux/amd64) for reproducible builds. - /// The resolved image digest is recorded in contract metadata. Pin via - /// `--docker=@sha256:...` for fully-reproducible builds. Aborted - /// builds may leave a stopped container; clean with `docker container prune`. + /// Build backend. + /// + /// - `local` (default): build using the host's rust toolchain. + /// - `docker`: build inside `docker.io/library/rust:latest` (linux/amd64). + /// The resolved image digest is recorded in contract metadata. + /// - `docker=`: build inside the specified Docker image. Pin via + /// `--backend docker=@sha256:...` for fully-reproducible builds. + /// + /// Aborted docker builds may leave a stopped container; clean with `docker container prune`. #[arg( long, - num_args = 0..=1, - require_equals = true, - default_missing_value = "docker.io/library/rust:latest", - value_name = "IMAGE", - value_parser = parse_docker_image, + value_name = "BACKEND", + default_value = "local", + value_parser = parse_backend, help_heading = "Reproducible Build", )] - pub docker: Option, + pub backend: Backend, #[command(flatten)] pub container_args: ContainerArgs, @@ -122,6 +125,51 @@ pub struct Cmd { pub build_args: BuildArgs, } +/// Build backend selector for `--backend`. +#[derive(Clone, Debug, Default)] +pub enum Backend { + /// Build with the host's rust toolchain. + #[default] + Local, + /// Build inside a Docker container with the given image. + Docker { image: String }, +} + +impl Backend { + /// Returns the docker image if the backend is `Docker`, else `None`. + pub fn docker_image(&self) -> Option<&str> { + match self { + Self::Docker { image } => Some(image), + Self::Local => None, + } + } +} + +const DEFAULT_DOCKER_IMAGE: &str = "docker.io/library/rust:latest"; + +fn parse_backend(s: &str) -> Result { + match s { + "local" => Ok(Backend::Local), + "docker" => Ok(Backend::Docker { + image: DEFAULT_DOCKER_IMAGE.to_string(), + }), + _ => { + if let Some(image) = s.strip_prefix("docker=") { + if image.is_empty() { + return Err("docker image cannot be empty; use `--backend docker` for the default image".to_string()); + } + Ok(Backend::Docker { + image: image.to_string(), + }) + } else { + Err(format!( + "unknown backend {s:?}; expected `local`, `docker`, or `docker=`" + )) + } + } + } +} + /// Shared build options for meta and optimization, reused by deploy and upload. #[derive(Parser, Debug, Clone, Default)] pub struct BuildArgs { @@ -135,15 +183,6 @@ pub struct BuildArgs { pub optimize: bool, } -fn parse_docker_image(s: &str) -> Result { - if s.is_empty() { - return Err( - "image cannot be empty; pass --docker without a value to use the default image, or --docker=".to_string(), - ); - } - Ok(s.to_string()) -} - pub fn parse_meta_arg(s: &str) -> Result<(String, String), Error> { let parts = s.splitn(2, '='); @@ -243,7 +282,7 @@ impl Default for Cmd { out_dir: None, locked: false, print_commands_only: false, - docker: None, + backend: Backend::Local, container_args: ContainerArgs { docker_host: None }, rustup_toolchain: None, build_args: BuildArgs::default(), @@ -289,10 +328,10 @@ impl Cmd { } cmd.arg("rustc"); // Force --locked when building inside Docker so the build is deterministic. - if self.locked || self.docker.is_some() { + if self.locked || self.backend.docker_image().is_some() { cmd.arg("--locked"); } - let manifest_path = if self.docker.is_some() { + let manifest_path = if self.backend.docker_image().is_some() { // Inside the container the workspace is mounted at /workspace. let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) .unwrap_or(p.manifest_path.clone().into()); @@ -327,9 +366,10 @@ impl Cmd { } } - if let Some(rustflags) = - make_rustflags_to_remap_absolute_paths(&print, self.docker.is_some())? - { + if let Some(rustflags) = make_rustflags_to_remap_absolute_paths( + &print, + self.backend.docker_image().is_some(), + )? { cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); } @@ -340,12 +380,12 @@ impl Cmd { let cmd_str = serialize_command(&cmd); if self.print_commands_only { - if let Some(image) = &self.docker { + if let Some(image) = self.backend.docker_image() { println!("# inside docker image: {image}"); } println!("{cmd_str}"); } else { - let bldimg = if let Some(image) = &self.docker { + let bldimg = if let Some(image) = self.backend.docker_image() { print.infoln(format!("docker[{image}] {cmd_str}")); Some( build_docker::run_in_docker( diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 8178bfc740..359c2a3ce1 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -211,24 +211,30 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E // We pull with --platform=linux/amd64 so the recorded digest is platform-specific; // reproducibility on `verify` depends on always pulling with that same platform. +// Returns a fully-qualified `/@sha256:` reference so +// that `verify` on a different machine can resolve it without depending on +// local registry config. async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { - if parse_pinned_digest(image).is_some() { - return Ok(image.to_string()); - } - let info = docker - .inspect_image(image) - .await - .map_err(|e| Error::DockerImageInspect { - image: image.to_string(), - source: e, - })?; - info.repo_digests - .unwrap_or_default() - .into_iter() - .next() - .ok_or_else(|| Error::DockerNoDigest { - image: image.to_string(), - }) + let canonical = fully_qualify(strip_tag(image)); + let digest = if let Some((_, digest)) = parse_pinned_digest(image) { + digest.to_string() + } else { + docker + .inspect_image(image) + .await + .map_err(|e| Error::DockerImageInspect { + image: image.to_string(), + source: e, + })? + .repo_digests + .unwrap_or_default() + .into_iter() + .find_map(|d| d.split_once('@').map(|(_, d)| d.to_string())) + .ok_or_else(|| Error::DockerNoDigest { + image: image.to_string(), + })? + }; + Ok(format!("{canonical}@{digest}")) } fn parse_pinned_digest(image: &str) -> Option<(&str, &str)> { @@ -236,6 +242,36 @@ fn parse_pinned_digest(image: &str) -> Option<(&str, &str)> { after.starts_with("sha256:").then_some((name, after)) } +/// Strip any `@sha256:...` and `:tag` suffix, leaving only the repository name. +fn strip_tag(image: &str) -> &str { + let no_digest = image.split_once('@').map_or(image, |(name, _)| name); + // Tags appear after the last `/`; a `:` in the host portion (host:port) is not a tag. + match no_digest.rfind('/') { + Some(slash) => match no_digest[slash + 1..].rfind(':') { + Some(colon) => &no_digest[..slash + 1 + colon], + None => no_digest, + }, + None => match no_digest.rfind(':') { + Some(colon) => &no_digest[..colon], + None => no_digest, + }, + } +} + +/// Add the implicit `docker.io` registry (and `library/` namespace for short names). +fn fully_qualify(name: &str) -> String { + let has_registry = name + .split_once('/') + .is_some_and(|(host, _)| host.contains('.') || host.contains(':') || host == "localhost"); + if has_registry { + name.to_string() + } else if name.contains('/') { + format!("docker.io/{name}") + } else { + format!("docker.io/library/{name}") + } +} + #[allow(clippy::unnecessary_wraps)] #[cfg(unix)] fn current_uid_gid() -> Option { @@ -283,4 +319,33 @@ mod tests { Some(("host:5000/name:tag", "sha256:abc")) ); } + + #[test] + fn strip_tag_cases() { + assert_eq!(strip_tag("rust"), "rust"); + assert_eq!(strip_tag("rust:latest"), "rust"); + assert_eq!(strip_tag("rust@sha256:abc"), "rust"); + assert_eq!(strip_tag("rust:latest@sha256:abc"), "rust"); + assert_eq!( + strip_tag("docker.io/library/rust:latest"), + "docker.io/library/rust" + ); + assert_eq!(strip_tag("host:5000/myimage:v1"), "host:5000/myimage"); + } + + #[test] + fn fully_qualify_cases() { + assert_eq!(fully_qualify("rust"), "docker.io/library/rust"); + assert_eq!(fully_qualify("myorg/myimage"), "docker.io/myorg/myimage"); + assert_eq!( + fully_qualify("docker.io/library/rust"), + "docker.io/library/rust" + ); + assert_eq!( + fully_qualify("quay.io/myorg/myimage"), + "quay.io/myorg/myimage" + ); + assert_eq!(fully_qualify("host:5000/myimage"), "host:5000/myimage"); + assert_eq!(fully_qualify("localhost/myimage"), "localhost/myimage"); + } } diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 3c63a1d1d8..9a7c72e976 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -29,7 +29,7 @@ pub struct Cmd { /// Override the docker image read from the contract metadata. For debugging only. #[arg(long, value_name = "IMAGE", help_heading = "Advanced")] - pub docker: Option, + pub docker_image: Option, #[command(flatten)] pub container_args: ContainerArgs, @@ -45,7 +45,7 @@ pub enum Error { Build(#[from] build::Error), #[error("stellar asset contract has no source to verify")] StellarAssetContract, - #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --docker` to make it verifiable")] + #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --backend docker` to make it verifiable")] MissingMeta(&'static str), #[error("CLI version mismatch: contract says '{expected}', running CLI is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, @@ -93,7 +93,7 @@ impl Cmd { actual: running, }); } - let bldimg = match &self.docker { + let bldimg = match &self.docker_image { Some(image) => image.clone(), None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, }; @@ -106,7 +106,7 @@ impl Cmd { let build_cmd = build::Cmd { manifest_path: Some(manifest_path), - docker: Some(bldimg), + backend: build::Backend::Docker { image: bldimg }, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), ..build::Cmd::default() From e5bba0e005a8f7b38d2fc72c82596f2c55b019bc Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:02:50 +0000 Subject: [PATCH 11/64] remove docker image override on verify --- cmd/soroban-cli/src/commands/contract/verify.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 9a7c72e976..c15237da9b 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -27,10 +27,6 @@ pub struct Cmd { #[arg(long, default_value = ".")] pub source: PathBuf, - /// Override the docker image read from the contract metadata. For debugging only. - #[arg(long, value_name = "IMAGE", help_heading = "Advanced")] - pub docker_image: Option, - #[command(flatten)] pub container_args: ContainerArgs, } @@ -93,10 +89,7 @@ impl Cmd { actual: running, }); } - let bldimg = match &self.docker_image { - Some(image) => image.clone(), - None => find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?, - }; + let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let manifest_path = self.source.join("Cargo.toml"); From 956487a3c5311e47b0b8bb54d055ca42279f50a7 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:13:31 +0000 Subject: [PATCH 12/64] verify: print cliver/rsver/bldimg from meta --- cmd/soroban-cli/src/commands/contract/verify.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index c15237da9b..bcf7ae6448 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -82,6 +82,12 @@ impl Cmd { let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; + let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; + let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; + print.infoln(format!("cliver: {cliver}")); + print.infoln(format!("rsver: {rsver}")); + print.infoln(format!("bldimg: {bldimg}")); + let running = version::one_line(); if cliver != running { return Err(Error::CliVersionMismatch { @@ -89,8 +95,6 @@ impl Cmd { actual: running, }); } - let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; - let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let manifest_path = self.source.join("Cargo.toml"); if !manifest_path.exists() { From 79c5bb3156b3046e21e35ce7407c3f5a09680d07 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:14:25 +0000 Subject: [PATCH 13/64] verify: 'wasm sha256' -> 'wasm hash' --- cmd/soroban-cli/src/commands/contract/verify.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index bcf7ae6448..2f1a5d94ae 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -78,7 +78,7 @@ impl Cmd { Contract::StellarAssetContract => return Err(Error::StellarAssetContract), }; let original_hash = hex::encode(Sha256::digest(&wasm_bytes)); - print.infoln(format!("Original wasm sha256: {original_hash}")); + print.infoln(format!("Original wasm hash: {original_hash}")); let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; From d63894c74d929e4e4bb95a7507ffff72a6e95244 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:14:58 +0000 Subject: [PATCH 14/64] verify: friendlier labels for meta values --- cmd/soroban-cli/src/commands/contract/verify.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 2f1a5d94ae..131d09fd91 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -84,9 +84,9 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - print.infoln(format!("cliver: {cliver}")); - print.infoln(format!("rsver: {rsver}")); - print.infoln(format!("bldimg: {bldimg}")); + print.infoln(format!("stellar-cli version: {cliver}")); + print.infoln(format!("rust version: {rsver}")); + print.infoln(format!("Docker image: {bldimg}")); let running = version::one_line(); if cliver != running { From 554c10c665f86311a4015dfec12f3de10c9cc886 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:20:29 +1000 Subject: [PATCH 15/64] refactor build backend handling to use container logic --- .../src/commands/contract/build.rs | 59 ++++++++++--------- .../{build_docker.rs => build_container.rs} | 48 +++++++-------- cmd/soroban-cli/src/commands/contract/mod.rs | 4 +- .../src/commands/contract/verify.rs | 8 +-- 4 files changed, 59 insertions(+), 60 deletions(-) rename cmd/soroban-cli/src/commands/contract/{build_docker.rs => build_container.rs} (90%) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index ff9ec6ad8e..a59a1e5843 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,7 +19,9 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{container::shared::Args as ContainerArgs, contract::build_docker, global, version}, + commands::{ + container::shared::Args as ContainerArgs, contract::build_container, global, version, + }, print::Print, wasm, }; @@ -98,12 +100,13 @@ pub struct Cmd { /// Build backend. /// /// - `local` (default): build using the host's rust toolchain. - /// - `docker`: build inside `docker.io/library/rust:latest` (linux/amd64). - /// The resolved image digest is recorded in contract metadata. - /// - `docker=`: build inside the specified Docker image. Pin via - /// `--backend docker=@sha256:...` for fully-reproducible builds. + /// - `container`: build inside `docker.io/library/rust:latest` (linux/amd64) + /// using the local container runtime. The resolved image digest is + /// recorded in contract metadata. + /// - `container=`: build inside the specified container image. Pin + /// via `--backend container=@sha256:...` for fully-reproducible builds. /// - /// Aborted docker builds may leave a stopped container; clean with `docker container prune`. + /// Aborted container builds may leave a stopped container; clean with `docker container prune`. #[arg( long, value_name = "BACKEND", @@ -131,39 +134,39 @@ pub enum Backend { /// Build with the host's rust toolchain. #[default] Local, - /// Build inside a Docker container with the given image. - Docker { image: String }, + /// Build inside a container with the given image. + Container { image: String }, } impl Backend { - /// Returns the docker image if the backend is `Docker`, else `None`. - pub fn docker_image(&self) -> Option<&str> { + /// Returns the container image if the backend is `Container`, else `None`. + pub fn container_image(&self) -> Option<&str> { match self { - Self::Docker { image } => Some(image), + Self::Container { image } => Some(image), Self::Local => None, } } } -const DEFAULT_DOCKER_IMAGE: &str = "docker.io/library/rust:latest"; +const DEFAULT_CONTAINER_IMAGE: &str = "docker.io/library/rust:latest"; fn parse_backend(s: &str) -> Result { match s { "local" => Ok(Backend::Local), - "docker" => Ok(Backend::Docker { - image: DEFAULT_DOCKER_IMAGE.to_string(), + "container" => Ok(Backend::Container { + image: DEFAULT_CONTAINER_IMAGE.to_string(), }), _ => { - if let Some(image) = s.strip_prefix("docker=") { + if let Some(image) = s.strip_prefix("container=") { if image.is_empty() { - return Err("docker image cannot be empty; use `--backend docker` for the default image".to_string()); + return Err("container image cannot be empty; use `--backend container` for the default image".to_string()); } - Ok(Backend::Docker { + Ok(Backend::Container { image: image.to_string(), }) } else { Err(format!( - "unknown backend {s:?}; expected `local`, `docker`, or `docker=`" + "unknown backend {s:?}; expected `local`, `container`, or `container=`" )) } } @@ -263,7 +266,7 @@ pub enum Error { WasmParsing(String), #[error(transparent)] - Docker(#[from] build_docker::Error), + Container(#[from] build_container::Error), } const WASM_TARGET: &str = "wasm32v1-none"; @@ -328,14 +331,14 @@ impl Cmd { } cmd.arg("rustc"); // Force --locked when building inside Docker so the build is deterministic. - if self.locked || self.backend.docker_image().is_some() { + if self.locked || self.backend.container_image().is_some() { cmd.arg("--locked"); } - let manifest_path = if self.backend.docker_image().is_some() { + let manifest_path = if self.backend.container_image().is_some() { // Inside the container the workspace is mounted at /workspace. let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) .unwrap_or(p.manifest_path.clone().into()); - Path::new(build_docker::WORK_DIR).join(rel) + Path::new(build_container::WORK_DIR).join(rel) } else { pathdiff::diff_paths(&p.manifest_path, &working_dir) .unwrap_or(p.manifest_path.clone().into()) @@ -368,7 +371,7 @@ impl Cmd { if let Some(rustflags) = make_rustflags_to_remap_absolute_paths( &print, - self.backend.docker_image().is_some(), + self.backend.container_image().is_some(), )? { cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); } @@ -380,15 +383,15 @@ impl Cmd { let cmd_str = serialize_command(&cmd); if self.print_commands_only { - if let Some(image) = self.backend.docker_image() { - println!("# inside docker image: {image}"); + if let Some(image) = self.backend.container_image() { + println!("# inside container image: {image}"); } println!("{cmd_str}"); } else { - let bldimg = if let Some(image) = self.backend.docker_image() { - print.infoln(format!("docker[{image}] {cmd_str}")); + let bldimg = if let Some(image) = self.backend.container_image() { + print.infoln(format!("container[{image}] {cmd_str}")); Some( - build_docker::run_in_docker( + build_container::run_in_container( &cmd, image, workspace_root, diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs similarity index 90% rename from cmd/soroban-cli/src/commands/contract/build_docker.rs rename to cmd/soroban-cli/src/commands/contract/build_container.rs index 359c2a3ce1..f0a70314d2 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -26,29 +26,29 @@ const RUSTUP_DIR: &str = "/usr/local/rustup"; #[derive(thiserror::Error, Debug)] pub enum Error { - #[error("cannot connect to docker daemon; is the daemon running? ({0})")] - DockerNotRunning(ContainerError), + #[error("cannot connect to container runtime; is it running? ({0})")] + RuntimeNotRunning(ContainerError), - #[error("pulling docker image {image}: {source}")] - DockerImagePull { + #[error("pulling container image {image}: {source}")] + ImagePull { image: String, source: bollard::errors::Error, }, - #[error("inspecting docker image {image}: {source}")] - DockerImageInspect { + #[error("inspecting container image {image}: {source}")] + ImageInspect { image: String, source: bollard::errors::Error, }, - #[error("docker image {image} has no repository digest; pin via --docker=/@sha256:...")] - DockerNoDigest { image: String }, + #[error("container image {image} has no repository digest; pin via --backend container=/@sha256:...")] + NoDigest { image: String }, - #[error("build failed inside docker container (exit {0})")] - DockerBuildExit(i64), + #[error("build failed inside container (exit {0})")] + BuildExit(i64), - #[error("docker run: {0}")] - DockerRun(#[from] bollard::errors::Error), + #[error("container runtime: {0}")] + Runtime(#[from] bollard::errors::Error), #[error("resolving CARGO_HOME / RUSTUP_HOME: {0}")] CargoHome(std::io::Error), @@ -56,7 +56,7 @@ pub enum Error { /// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, /// returning the resolved `name@sha256:...` reference for embedding into meta. -pub async fn run_in_docker( +pub async fn run_in_container( cmd: &Command, image: &str, workspace_root: &Path, @@ -67,7 +67,7 @@ pub async fn run_in_docker( let docker: Docker = container_args .connect_to_docker(print) .await - .map_err(Error::DockerNotRunning)?; + .map_err(Error::RuntimeNotRunning)?; pull_image(&docker, image, print).await?; let resolved = resolve_image_digest(&docker, image).await?; @@ -170,11 +170,11 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res match res { Ok(r) => exit_code = r.status_code, Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => exit_code = code, - Err(e) => return Err(Error::DockerRun(e)), + Err(e) => return Err(Error::Runtime(e)), } } if exit_code != 0 { - return Err(Error::DockerBuildExit(exit_code)); + return Err(Error::BuildExit(exit_code)); } Ok(()) } @@ -189,14 +189,10 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E None, None, ); - while let Some(item) = stream - .try_next() - .await - .map_err(|e| Error::DockerImagePull { - image: image.to_string(), - source: e, - })? - { + while let Some(item) = stream.try_next().await.map_err(|e| Error::ImagePull { + image: image.to_string(), + source: e, + })? { if let Some(status) = item.status { if status.contains("Pulling from") || status.contains("Digest") @@ -222,7 +218,7 @@ async fn resolve_image_digest(docker: &Docker, image: &str) -> Result Result Date: Wed, 29 Apr 2026 01:23:45 +0000 Subject: [PATCH 16/64] verify: indent meta lines; drop blank line after build complete --- cmd/soroban-cli/src/commands/contract/build.rs | 2 +- cmd/soroban-cli/src/commands/contract/verify.rs | 9 ++++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index a59a1e5843..85bd95ee95 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -700,7 +700,7 @@ impl Cmd { } } - print.checkln("Build Complete\n"); + print.checkln("Build Complete"); } } diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 87e2dcea61..883969699b 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -78,15 +78,14 @@ impl Cmd { Contract::StellarAssetContract => return Err(Error::StellarAssetContract), }; let original_hash = hex::encode(Sha256::digest(&wasm_bytes)); - print.infoln(format!("Original wasm hash: {original_hash}")); - let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - print.infoln(format!("stellar-cli version: {cliver}")); - print.infoln(format!("rust version: {rsver}")); - print.infoln(format!("Container image: {bldimg}")); + print.infoln(format!("Original wasm hash: {original_hash}")); + print.blankln(format!("stellar-cli version: {cliver}")); + print.blankln(format!("rust version: {rsver}")); + print.blankln(format!("Container image: {bldimg}")); let running = version::one_line(); if cliver != running { From ab6b9b3bbb5f2f8f891325added631b9477e8231 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:25:57 +0000 Subject: [PATCH 17/64] build: blank line between successive contract builds --- cmd/soroban-cli/src/commands/contract/build.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 85bd95ee95..056daf664f 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -320,7 +320,11 @@ impl Cmd { let wasm_target = get_wasm_target()?; let mut built_contracts = Vec::new(); - for p in packages { + for (i, p) in packages.iter().enumerate() { + if i > 0 { + // Blank line separating successive contract builds in a workspace. + eprintln!(); + } let mut cmd = Command::new("cargo"); cmd.stdout(Stdio::piped()); // `+` is rustup's explicit toolchain selector and overrides From 0022b2d41044c3af365d7c6371898a40f4eae111 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:36:50 +0000 Subject: [PATCH 18/64] container: emit cargo output raw, force CARGO_TERM_COLOR=always --- .../src/commands/contract/build_container.rs | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index f0a70314d2..c6f3a34615 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -69,7 +69,7 @@ pub async fn run_in_container( .await .map_err(Error::RuntimeNotRunning)?; - pull_image(&docker, image, print).await?; + pull_image(&docker, image).await?; let resolved = resolve_image_digest(&docker, image).await?; // Bind-mount the host's cargo registry and rustup state. Bind mounts @@ -96,6 +96,9 @@ pub async fn run_in_container( "SOURCE_DATE_EPOCH={}", source_date_epoch(workspace_root) )); + // Force cargo to emit color (otherwise cargo detects the non-TTY stdout + // and falls back to monochrome). Matches what users see for local builds. + env.push("CARGO_TERM_COLOR=always".to_string()); let container_cmd: Vec = std::iter::once(cmd.get_program()) .chain(cmd.get_args()) @@ -126,7 +129,7 @@ pub async fn run_in_container( .await? .id; - let result = run_and_wait(&docker, &container_id, print).await; + let result = run_and_wait(&docker, &container_id).await; let _ = docker .remove_container( @@ -142,7 +145,7 @@ pub async fn run_in_container( Ok(resolved) } -async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Result<(), Error> { +async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { docker .start_container(container_id, None::) .await?; @@ -160,7 +163,9 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res let s = item?.to_string(); let s = s.trim_end_matches('\n'); if !s.is_empty() { - print.infoln(s); + // Emit container output raw (no `ℹ️` prefix) so it looks like + // cargo running locally. + eprintln!("{s}"); } } @@ -179,7 +184,7 @@ async fn run_and_wait(docker: &Docker, container_id: &str, print: &Print) -> Res Ok(()) } -async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { +async fn pull_image(docker: &Docker, image: &str) -> Result<(), Error> { let mut stream = docker.create_image( Some(CreateImageOptions { from_image: Some(image.to_string()), @@ -198,7 +203,7 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E || status.contains("Digest") || status.contains("Status") { - print.infoln(status); + eprintln!("{status}"); } } } From 18332ef6016b2df1f02ee2a9162c7b61c1b12834 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:57:13 +0000 Subject: [PATCH 19/64] container: re-add info prefix on docker pull progress lines --- cmd/soroban-cli/src/commands/contract/build_container.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index c6f3a34615..26ab244553 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -69,7 +69,7 @@ pub async fn run_in_container( .await .map_err(Error::RuntimeNotRunning)?; - pull_image(&docker, image).await?; + pull_image(&docker, image, print).await?; let resolved = resolve_image_digest(&docker, image).await?; // Bind-mount the host's cargo registry and rustup state. Bind mounts @@ -184,7 +184,7 @@ async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> Ok(()) } -async fn pull_image(docker: &Docker, image: &str) -> Result<(), Error> { +async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { let mut stream = docker.create_image( Some(CreateImageOptions { from_image: Some(image.to_string()), @@ -203,7 +203,7 @@ async fn pull_image(docker: &Docker, image: &str) -> Result<(), Error> { || status.contains("Digest") || status.contains("Status") { - eprintln!("{status}"); + print.infoln(status); } } } From 1adb2b268c78ecefda01fe54e2ab5b513d20cbee Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 01:58:19 +0000 Subject: [PATCH 20/64] container: emoji on first pull line, blankln continuations --- cmd/soroban-cli/src/commands/contract/build_container.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index 26ab244553..ad14b8e1ff 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -194,6 +194,7 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E None, None, ); + let mut first = true; while let Some(item) = stream.try_next().await.map_err(|e| Error::ImagePull { image: image.to_string(), source: e, @@ -203,7 +204,12 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E || status.contains("Digest") || status.contains("Status") { - print.infoln(status); + if first { + print.infoln(status); + first = false; + } else { + print.blankln(status); + } } } } From 7ff2f48b6e54ff5f8abd2ad048aae2c841c4a734 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 02:15:22 +0000 Subject: [PATCH 21/64] verify: add 'Loading meta from contract' header --- cmd/soroban-cli/src/commands/contract/verify.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 883969699b..07b8270bcf 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -82,7 +82,8 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - print.infoln(format!("Original wasm hash: {original_hash}")); + print.infoln("Loading meta from contract..."); + print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); print.blankln(format!("Container image: {bldimg}")); From b285a68af68687a944f33e8fc79d504e641b3743 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:17:52 +1000 Subject: [PATCH 22/64] update logging messages for contracts --- cmd/soroban-cli/src/commands/contract/info/shared.rs | 6 +++--- cmd/soroban-cli/src/commands/contract/verify.rs | 1 - 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/info/shared.rs b/cmd/soroban-cli/src/commands/contract/info/shared.rs index b06d6eb4dc..f9a8e23635 100644 --- a/cmd/soroban-cli/src/commands/contract/info/shared.rs +++ b/cmd/soroban-cli/src/commands/contract/info/shared.rs @@ -122,7 +122,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { // Check if a local WASM file path is provided if let Some(path) = &args.wasm { // Read the WASM file and return its contents - print.infoln("Loading contract spec from file..."); + print.infoln("Loading contract from file..."); let wasm_bytes = wasm::Args { wasm: path.clone() }.read()?; return Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, @@ -149,7 +149,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { .await?; print.globeln(format!( - "Downloading contract spec for wasm hash: {wasm_hash}" + "Downloading contract for wasm hash: {wasm_hash}" )); let wasm_bytes = get_remote_wasm_from_hash(&client, &hash).await?; Ok(Fetched { @@ -164,7 +164,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { contract_id.resolve_contract_id(&args.locator, &network.network_passphrase)?; let derived_address = xdr::ScAddress::Contract(ContractId(xdr::Hash(contract_id.0))).to_string(); - print.globeln(format!("Downloading contract spec: {derived_address}")); + print.globeln(format!("Downloading contract: {derived_address}")); let res = wasm::fetch_from_contract(&contract_id, network).await; if let Some(ContractIsStellarAsset) = res.as_ref().err() { return Ok(Fetched { diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 07b8270bcf..fde9095bb0 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -82,7 +82,6 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - print.infoln("Loading meta from contract..."); print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); From 216fed1616bacc51d614c0ce79dabbe4f4d6ab67 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 02:27:12 +0000 Subject: [PATCH 23/64] container: always install wasm target before build --- .../src/commands/contract/build.rs | 2 ++ .../src/commands/contract/build_container.rs | 19 ++++++++++++++++++- .../src/commands/contract/info/shared.rs | 4 +--- 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 056daf664f..d64d67ce16 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -400,6 +400,8 @@ impl Cmd { image, workspace_root, target_dir.as_std_path(), + &wasm_target, + self.rustup_toolchain.as_deref(), &self.container_args, &print, ) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index ad14b8e1ff..59f6a96ea4 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -61,6 +61,8 @@ pub async fn run_in_container( image: &str, workspace_root: &Path, target_dir: &Path, + wasm_target: &str, + pin_toolchain: Option<&str>, container_args: &ContainerArgs, print: &Print, ) -> Result { @@ -100,11 +102,26 @@ pub async fn run_in_container( // and falls back to monochrome). Matches what users see for local builds. env.push("CARGO_TERM_COLOR=always".to_string()); - let container_cmd: Vec = std::iter::once(cmd.get_program()) + let argv: Vec = std::iter::once(cmd.get_program()) .chain(cmd.get_args()) .map(OsStr::to_string_lossy) .map(std::borrow::Cow::into_owned) .collect(); + // Always install the wasm target before the build so we don't depend on + // the workspace's `rust-toolchain.toml` having configured it. When pinning, + // install for the override toolchain; otherwise the rustup default applies. + // Args pass through `$@` so we don't have to shell-escape. + let target_install = match pin_toolchain { + Some(toolchain) => format!("rustup target add --toolchain {toolchain} {wasm_target}"), + None => format!("rustup target add {wasm_target}"), + }; + let mut container_cmd = vec![ + "sh".to_string(), + "-c".to_string(), + format!("{target_install} && exec \"$@\""), + "sh".to_string(), + ]; + container_cmd.extend(argv); let config = ContainerCreateBody { image: Some(resolved.clone()), diff --git a/cmd/soroban-cli/src/commands/contract/info/shared.rs b/cmd/soroban-cli/src/commands/contract/info/shared.rs index f9a8e23635..183ce9dcad 100644 --- a/cmd/soroban-cli/src/commands/contract/info/shared.rs +++ b/cmd/soroban-cli/src/commands/contract/info/shared.rs @@ -148,9 +148,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { .verify_network_passphrase(Some(&network.network_passphrase)) .await?; - print.globeln(format!( - "Downloading contract for wasm hash: {wasm_hash}" - )); + print.globeln(format!("Downloading contract for wasm hash: {wasm_hash}")); let wasm_bytes = get_remote_wasm_from_hash(&client, &hash).await?; Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, From 1f168b65ef48df816d49e6979fd5ef718e66c3e4 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 02:27:48 +0000 Subject: [PATCH 24/64] container: allow too_many_arguments on run_in_container --- cmd/soroban-cli/src/commands/contract/build_container.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index 59f6a96ea4..4afe2b4a49 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -56,6 +56,7 @@ pub enum Error { /// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, /// returning the resolved `name@sha256:...` reference for embedding into meta. +#[allow(clippy::too_many_arguments)] pub async fn run_in_container( cmd: &Command, image: &str, From 0455f17c3f6f3589af64d8c237491fe69aa4f265 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:40:20 +1000 Subject: [PATCH 25/64] update build container commands --- cmd/soroban-cli/src/commands/contract/build_container.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index 4afe2b4a49..56a66096a0 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -113,8 +113,8 @@ pub async fn run_in_container( // install for the override toolchain; otherwise the rustup default applies. // Args pass through `$@` so we don't have to shell-escape. let target_install = match pin_toolchain { - Some(toolchain) => format!("rustup target add --toolchain {toolchain} {wasm_target}"), - None => format!("rustup target add {wasm_target}"), + Some(toolchain) => format!("rustup --quiet target add --toolchain {toolchain} {wasm_target}"), + None => format!("rustup --quiet target add {wasm_target}"), }; let mut container_cmd = vec![ "sh".to_string(), From af097d1236adf17734c4e69135e0042019df5cfd Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 02:43:45 +0000 Subject: [PATCH 26/64] verify: use Print for verdict lines --- cmd/soroban-cli/src/commands/contract/verify.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index fde9095bb0..416941ef04 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -120,12 +120,13 @@ impl Cmd { produced.push((c.name.clone(), hash, c.path.clone())); } - // Verdict bypasses --quiet because pass/fail is this command's primary output. if let Some(name) = matched { - eprintln!("✅ Verified: rebuilt wasm matches (sha256 {original_hash}) — {name}"); + print.checkln(format!( + "Verified: rebuilt wasm matches (sha256 {original_hash}) — {name}" + )); Ok(()) } else { - eprintln!("⚠ Verification failed: rebuilt wasm does not match original."); + print.warnln("Verification failed: rebuilt wasm does not match original."); Err(Error::Mismatch { expected: original_hash, produced, From 0b886a639ddf52f48b8d3421ce5c6461672d98e4 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 03:05:39 +0000 Subject: [PATCH 27/64] verify: enforce single contract per invocation; pre-check workspace --- .../src/commands/contract/verify.rs | 96 +++++++++++++------ 1 file changed, 69 insertions(+), 27 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 416941ef04..a0d9138a8c 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -1,7 +1,7 @@ -use std::fmt::Write as _; use std::fs; use std::path::PathBuf; +use cargo_metadata::MetadataCommand; use clap::Parser; use sha2::{Digest, Sha256}; use soroban_spec_tools::contract::{self, Spec}; @@ -45,10 +45,11 @@ pub enum Error { MissingMeta(&'static str), #[error("CLI version mismatch: contract says '{expected}', running CLI is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, - #[error("{}", format_mismatch(expected, produced))] + #[error("verification failed: rebuilt {name} (sha256 {actual}) does not match original (sha256 {expected})")] Mismatch { + name: String, expected: String, - produced: Vec<(String, String, PathBuf)>, + actual: String, }, #[error( "no Cargo.toml found at {0}; pass --source to point at the contract's source tree" @@ -56,16 +57,12 @@ pub enum Error { SourceNotFound(PathBuf), #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), -} - -fn format_mismatch(expected: &str, produced: &[(String, String, PathBuf)]) -> String { - let mut s = format!( - "verification failed: rebuilt wasm does not match (expected sha256 {expected}).\nproduced:" - ); - for (name, hash, path) in produced { - let _ = write!(s, "\n {name} sha256:{hash} {}", path.display()); - } - s + #[error("expected source to produce exactly one cdylib contract, found {found:?}; verify only supports a single contract per invocation")] + ExpectedSingleContract { found: Vec }, + #[error("reading cargo metadata: {0}")] + Metadata(#[from] cargo_metadata::Error), + #[error("resolving source path: {0}")] + AbsolutePath(std::io::Error), } impl Cmd { @@ -100,6 +97,14 @@ impl Cmd { return Err(Error::SourceNotFound(self.source.clone())); } + // Verify takes a single wasm input, so the source must produce exactly + // one cdylib contract. Detect this up-front via cargo metadata so we + // don't waste a build cycle on a workspace with multiple contracts. + let cdylibs = single_cdylib_or_workspace_cdylibs(&manifest_path)?; + if cdylibs.len() != 1 { + return Err(Error::ExpectedSingleContract { found: cdylibs }); + } + let build_cmd = build::Cmd { manifest_path: Some(manifest_path), backend: build::Backend::Container { image: bldimg }, @@ -108,33 +113,70 @@ impl Cmd { ..build::Cmd::default() }; let built = build_cmd.run(global_args).await?; - - let mut produced = Vec::with_capacity(built.len()); - let mut matched = None; - for c in &built { - let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; - let hash = hex::encode(Sha256::digest(&bytes)); - if hash == original_hash { - matched = Some(c.name.clone()); + let c = match built.as_slice() { + [c] => c, + other => { + return Err(Error::ExpectedSingleContract { + found: other.iter().map(|c| c.name.clone()).collect(), + }); } - produced.push((c.name.clone(), hash, c.path.clone())); - } + }; - if let Some(name) = matched { + let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; + let hash = hex::encode(Sha256::digest(&bytes)); + if hash == original_hash { print.checkln(format!( - "Verified: rebuilt wasm matches (sha256 {original_hash}) — {name}" + "Verified: rebuilt {} wasm matches {original_hash}", + c.name )); Ok(()) } else { - print.warnln("Verification failed: rebuilt wasm does not match original."); + print.warnln(format!( + "Verification failed: rebuilt {} does not match original", + c.name + )); Err(Error::Mismatch { + name: c.name.clone(), expected: original_hash, - produced, + actual: hash, }) } } } +/// Mirror what `build::Cmd::packages` selects: if `manifest_path` points at a +/// specific package, return that package's name iff it is a cdylib; otherwise +/// (workspace root) return the names of all workspace-member cdylibs. +fn single_cdylib_or_workspace_cdylibs(manifest_path: &PathBuf) -> Result, Error> { + let metadata = MetadataCommand::new() + .manifest_path(manifest_path) + .no_deps() + .exec()?; + let manifest_abs = std::path::absolute(manifest_path).map_err(Error::AbsolutePath)?; + let is_cdylib = |p: &cargo_metadata::Package| { + p.targets + .iter() + .any(|t| t.crate_types.iter().any(|c| c == "cdylib")) + }; + Ok( + match metadata + .packages + .iter() + .find(|p| p.manifest_path == manifest_abs) + { + Some(p) if is_cdylib(p) => vec![p.name.to_string()], + Some(_) => vec![], + None => metadata + .packages + .iter() + .filter(|p| metadata.workspace_members.contains(&p.id)) + .filter(|p| is_cdylib(p)) + .map(|p| p.name.to_string()) + .collect(), + }, + ) +} + fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { meta.iter().find_map(|entry| { let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = entry; From e29046cc635e76537554320e38b60b0c3ffc264e Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 03:07:55 +0000 Subject: [PATCH 28/64] verify: clippy fix for implicit clone --- cmd/soroban-cli/src/commands/contract/verify.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index a0d9138a8c..b1fdea43ad 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -164,14 +164,14 @@ fn single_cdylib_or_workspace_cdylibs(manifest_path: &PathBuf) -> Result vec![p.name.to_string()], + Some(p) if is_cdylib(p) => vec![p.name.clone()], Some(_) => vec![], None => metadata .packages .iter() .filter(|p| metadata.workspace_members.contains(&p.id)) .filter(|p| is_cdylib(p)) - .map(|p| p.name.to_string()) + .map(|p| p.name.clone()) .collect(), }, ) From f49d288d1ab5903f6af1505207f17897e3772799 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 13:18:17 +1000 Subject: [PATCH 29/64] refactor build command string formatting --- cmd/soroban-cli/src/commands/contract/build_container.rs | 4 +++- cmd/soroban-cli/src/commands/contract/verify.rs | 8 ++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_container.rs index 56a66096a0..fc0212b1c7 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_container.rs @@ -113,7 +113,9 @@ pub async fn run_in_container( // install for the override toolchain; otherwise the rustup default applies. // Args pass through `$@` so we don't have to shell-escape. let target_install = match pin_toolchain { - Some(toolchain) => format!("rustup --quiet target add --toolchain {toolchain} {wasm_target}"), + Some(toolchain) => { + format!("rustup --quiet target add --toolchain {toolchain} {wasm_target}") + } None => format!("rustup --quiet target add {wasm_target}"), }; let mut container_cmd = vec![ diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index b1fdea43ad..4cee94ade2 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -43,9 +43,9 @@ pub enum Error { StellarAssetContract, #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --backend container` to make it verifiable")] MissingMeta(&'static str), - #[error("CLI version mismatch: contract says '{expected}', running CLI is '{actual}'. Install the matching CLI version and re-run.")] + #[error("stellar-cli version mismatch: contract was built with '{expected}', running stellar-cli is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, - #[error("verification failed: rebuilt {name} (sha256 {actual}) does not match original (sha256 {expected})")] + #[error("verification failed: rebuilt {name} ({actual}) does not match original ({expected})")] Mismatch { name: String, expected: String, @@ -131,10 +131,6 @@ impl Cmd { )); Ok(()) } else { - print.warnln(format!( - "Verification failed: rebuilt {} does not match original", - c.name - )); Err(Error::Mismatch { name: c.name.clone(), expected: original_hash, From 4ab95bf92fa00d119b325749dad5071d0254042e Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 03:27:06 +0000 Subject: [PATCH 30/64] verify: drop --source, use --manifest-path like build --- .../src/commands/contract/verify.rs | 69 +++++++++---------- 1 file changed, 33 insertions(+), 36 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 4cee94ade2..c24fcbe64c 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -23,9 +23,10 @@ pub struct Cmd { #[command(flatten)] pub common: shared::Args, - /// Source tree (Cargo.toml directory) to rebuild from. Defaults to cwd. - #[arg(long, default_value = ".")] - pub source: PathBuf, + /// Path to Cargo.toml of the source to rebuild. Defaults to the nearest + /// Cargo.toml in the current directory or its parents. + #[arg(long)] + pub manifest_path: Option, #[command(flatten)] pub container_args: ContainerArgs, @@ -51,10 +52,6 @@ pub enum Error { expected: String, actual: String, }, - #[error( - "no Cargo.toml found at {0}; pass --source to point at the contract's source tree" - )] - SourceNotFound(PathBuf), #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), #[error("expected source to produce exactly one cdylib contract, found {found:?}; verify only supports a single contract per invocation")] @@ -92,21 +89,16 @@ impl Cmd { }); } - let manifest_path = self.source.join("Cargo.toml"); - if !manifest_path.exists() { - return Err(Error::SourceNotFound(self.source.clone())); - } - // Verify takes a single wasm input, so the source must produce exactly // one cdylib contract. Detect this up-front via cargo metadata so we // don't waste a build cycle on a workspace with multiple contracts. - let cdylibs = single_cdylib_or_workspace_cdylibs(&manifest_path)?; + let cdylibs = single_cdylib_or_workspace_cdylibs(self.manifest_path.as_deref())?; if cdylibs.len() != 1 { return Err(Error::ExpectedSingleContract { found: cdylibs }); } let build_cmd = build::Cmd { - manifest_path: Some(manifest_path), + manifest_path: self.manifest_path.clone(), backend: build::Backend::Container { image: bldimg }, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), @@ -142,35 +134,40 @@ impl Cmd { /// Mirror what `build::Cmd::packages` selects: if `manifest_path` points at a /// specific package, return that package's name iff it is a cdylib; otherwise -/// (workspace root) return the names of all workspace-member cdylibs. -fn single_cdylib_or_workspace_cdylibs(manifest_path: &PathBuf) -> Result, Error> { - let metadata = MetadataCommand::new() - .manifest_path(manifest_path) - .no_deps() - .exec()?; - let manifest_abs = std::path::absolute(manifest_path).map_err(Error::AbsolutePath)?; +/// (workspace root, or no manifest given) return the names of all +/// workspace-member cdylibs. +fn single_cdylib_or_workspace_cdylibs( + manifest_path: Option<&std::path::Path>, +) -> Result, Error> { + let mut cmd = MetadataCommand::new(); + cmd.no_deps(); + if let Some(p) = manifest_path { + cmd.manifest_path(p); + } + let metadata = cmd.exec()?; + let manifest_abs = match manifest_path { + Some(p) => Some(std::path::absolute(p).map_err(Error::AbsolutePath)?), + None => None, + }; let is_cdylib = |p: &cargo_metadata::Package| { p.targets .iter() .any(|t| t.crate_types.iter().any(|c| c == "cdylib")) }; - Ok( - match metadata + let specific = manifest_abs + .as_ref() + .and_then(|abs| metadata.packages.iter().find(|p| p.manifest_path == *abs)); + Ok(match specific { + Some(p) if is_cdylib(p) => vec![p.name.clone()], + Some(_) => vec![], + None => metadata .packages .iter() - .find(|p| p.manifest_path == manifest_abs) - { - Some(p) if is_cdylib(p) => vec![p.name.clone()], - Some(_) => vec![], - None => metadata - .packages - .iter() - .filter(|p| metadata.workspace_members.contains(&p.id)) - .filter(|p| is_cdylib(p)) - .map(|p| p.name.clone()) - .collect(), - }, - ) + .filter(|p| metadata.workspace_members.contains(&p.id)) + .filter(|p| is_cdylib(p)) + .map(|p| p.name.clone()) + .collect(), + }) } fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { From 127a8ac197f3841bb9cff490217c49f2eaa1b116 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 03:51:20 +0000 Subject: [PATCH 31/64] move verify under build as a subcommand --- .../src/commands/contract/build.rs | 23 +++++++++++++++++++ cmd/soroban-cli/src/commands/contract/mod.rs | 7 ------ 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index d64d67ce16..00f2f5123a 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -126,6 +126,16 @@ pub struct Cmd { #[command(flatten)] pub build_args: BuildArgs, + + #[command(subcommand)] + pub action: Option, +} + +/// Subcommands of `stellar contract build`. +#[derive(clap::Subcommand, Debug, Clone)] +pub enum Action { + /// Verify a wasm by rebuilding it inside the container image recorded in its metadata. + Verify(super::verify::Cmd), } /// Build backend selector for `--backend`. @@ -267,6 +277,10 @@ pub enum Error { #[error(transparent)] Container(#[from] build_container::Error), + + // Boxed to break the cycle between verify::Error::Build and build::Error::Verify. + #[error(transparent)] + Verify(Box), } const WASM_TARGET: &str = "wasm32v1-none"; @@ -289,6 +303,7 @@ impl Default for Cmd { container_args: ContainerArgs { docker_host: None }, rustup_toolchain: None, build_args: BuildArgs::default(), + action: None, } } } @@ -297,6 +312,14 @@ impl Cmd { /// Builds the project and returns the built WASM artifacts. #[allow(clippy::too_many_lines)] pub async fn run(&self, global_args: &global::Args) -> Result, Error> { + if let Some(Action::Verify(verify)) = &self.action { + // Box::pin breaks the recursion: verify.run() calls build::Cmd::run() + // for the rebuild, so the future would otherwise have infinite size. + Box::pin(verify.run(global_args)) + .await + .map_err(|e| Error::Verify(Box::new(e)))?; + return Ok(Vec::new()); + } let print = Print::new(global_args.quiet); let working_dir = env::current_dir().map_err(Error::GettingCurrentDir)?; let metadata = self.metadata()?; diff --git a/cmd/soroban-cli/src/commands/contract/mod.rs b/cmd/soroban-cli/src/commands/contract/mod.rs index f5cffe3bf3..3014ffdd36 100644 --- a/cmd/soroban-cli/src/commands/contract/mod.rs +++ b/cmd/soroban-cli/src/commands/contract/mod.rs @@ -102,9 +102,6 @@ pub enum Cmd { // run as part of `contract build` so for a general user this is not needed. #[command(name = "spec-verify", hide = true)] SpecVerify(spec_verify::Cmd), - - /// Verify a wasm by rebuilding from source in the recorded container image. - Verify(verify::Cmd), } #[derive(thiserror::Error, Debug)] @@ -159,9 +156,6 @@ pub enum Error { #[error(transparent)] SpecVerify(#[from] spec_verify::Error), - - #[error(transparent)] - Verify(#[from] verify::Error), } impl Cmd { @@ -210,7 +204,6 @@ impl Cmd { Cmd::Read(read) => read.run().await?, Cmd::Restore(restore) => restore.run(global_args).await?, Cmd::SpecVerify(spec_verify) => spec_verify.run(global_args)?, - Cmd::Verify(verify) => verify.run(global_args).await?, } Ok(()) } From 9ed74d84eaf881fdb0510145c7eb0d3ceea6747a Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 14:04:50 +1000 Subject: [PATCH 32/64] change help heading to build backends --- cmd/soroban-cli/src/commands/contract/build.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 00f2f5123a..159a4fc743 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -112,7 +112,7 @@ pub struct Cmd { value_name = "BACKEND", default_value = "local", value_parser = parse_backend, - help_heading = "Reproducible Build", + help_heading = "Build Backends", )] pub backend: Backend, From a4d85994d11520328215c11597b81b385df3dadf Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 04:13:48 +0000 Subject: [PATCH 33/64] rename backend container -> docker --- .../src/commands/contract/build.rs | 60 +++++++++---------- .../{build_container.rs => build_docker.rs} | 14 ++--- cmd/soroban-cli/src/commands/contract/mod.rs | 2 +- .../src/commands/contract/verify.rs | 8 +-- 4 files changed, 41 insertions(+), 43 deletions(-) rename cmd/soroban-cli/src/commands/contract/{build_container.rs => build_docker.rs} (96%) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 159a4fc743..1241f1f0bd 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,9 +19,7 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{ - container::shared::Args as ContainerArgs, contract::build_container, global, version, - }, + commands::{container::shared::Args as ContainerArgs, contract::build_docker, global, version}, print::Print, wasm, }; @@ -100,13 +98,13 @@ pub struct Cmd { /// Build backend. /// /// - `local` (default): build using the host's rust toolchain. - /// - `container`: build inside `docker.io/library/rust:latest` (linux/amd64) - /// using the local container runtime. The resolved image digest is + /// - `docker`: build inside `docker.io/library/rust:latest` (linux/amd64) + /// using the local docker daemon. The resolved image digest is /// recorded in contract metadata. - /// - `container=`: build inside the specified container image. Pin - /// via `--backend container=@sha256:...` for fully-reproducible builds. + /// - `docker=`: build inside the specified docker image. Pin + /// via `--backend docker=@sha256:...` for fully-reproducible builds. /// - /// Aborted container builds may leave a stopped container; clean with `docker container prune`. + /// Aborted docker builds may leave a stopped container; clean with `docker container prune`. #[arg( long, value_name = "BACKEND", @@ -134,7 +132,7 @@ pub struct Cmd { /// Subcommands of `stellar contract build`. #[derive(clap::Subcommand, Debug, Clone)] pub enum Action { - /// Verify a wasm by rebuilding it inside the container image recorded in its metadata. + /// Verify a wasm by rebuilding it inside the Docker image recorded in its metadata. Verify(super::verify::Cmd), } @@ -144,39 +142,39 @@ pub enum Backend { /// Build with the host's rust toolchain. #[default] Local, - /// Build inside a container with the given image. - Container { image: String }, + /// Build inside a Docker container with the given image. + Docker { image: String }, } impl Backend { - /// Returns the container image if the backend is `Container`, else `None`. - pub fn container_image(&self) -> Option<&str> { + /// Returns the docker image if the backend is `Docker`, else `None`. + pub fn docker_image(&self) -> Option<&str> { match self { - Self::Container { image } => Some(image), + Self::Docker { image } => Some(image), Self::Local => None, } } } -const DEFAULT_CONTAINER_IMAGE: &str = "docker.io/library/rust:latest"; +const DEFAULT_DOCKER_IMAGE: &str = "docker.io/library/rust:latest"; fn parse_backend(s: &str) -> Result { match s { "local" => Ok(Backend::Local), - "container" => Ok(Backend::Container { - image: DEFAULT_CONTAINER_IMAGE.to_string(), + "docker" => Ok(Backend::Docker { + image: DEFAULT_DOCKER_IMAGE.to_string(), }), _ => { - if let Some(image) = s.strip_prefix("container=") { + if let Some(image) = s.strip_prefix("docker=") { if image.is_empty() { - return Err("container image cannot be empty; use `--backend container` for the default image".to_string()); + return Err("docker image cannot be empty; use `--backend docker` for the default image".to_string()); } - Ok(Backend::Container { + Ok(Backend::Docker { image: image.to_string(), }) } else { Err(format!( - "unknown backend {s:?}; expected `local`, `container`, or `container=`" + "unknown backend {s:?}; expected `local`, `docker`, or `docker=`" )) } } @@ -276,7 +274,7 @@ pub enum Error { WasmParsing(String), #[error(transparent)] - Container(#[from] build_container::Error), + Container(#[from] build_docker::Error), // Boxed to break the cycle between verify::Error::Build and build::Error::Verify. #[error(transparent)] @@ -358,14 +356,14 @@ impl Cmd { } cmd.arg("rustc"); // Force --locked when building inside Docker so the build is deterministic. - if self.locked || self.backend.container_image().is_some() { + if self.locked || self.backend.docker_image().is_some() { cmd.arg("--locked"); } - let manifest_path = if self.backend.container_image().is_some() { + let manifest_path = if self.backend.docker_image().is_some() { // Inside the container the workspace is mounted at /workspace. let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) .unwrap_or(p.manifest_path.clone().into()); - Path::new(build_container::WORK_DIR).join(rel) + Path::new(build_docker::WORK_DIR).join(rel) } else { pathdiff::diff_paths(&p.manifest_path, &working_dir) .unwrap_or(p.manifest_path.clone().into()) @@ -398,7 +396,7 @@ impl Cmd { if let Some(rustflags) = make_rustflags_to_remap_absolute_paths( &print, - self.backend.container_image().is_some(), + self.backend.docker_image().is_some(), )? { cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); } @@ -410,15 +408,15 @@ impl Cmd { let cmd_str = serialize_command(&cmd); if self.print_commands_only { - if let Some(image) = self.backend.container_image() { - println!("# inside container image: {image}"); + if let Some(image) = self.backend.docker_image() { + println!("# inside docker image: {image}"); } println!("{cmd_str}"); } else { - let bldimg = if let Some(image) = self.backend.container_image() { - print.infoln(format!("container[{image}] {cmd_str}")); + let bldimg = if let Some(image) = self.backend.docker_image() { + print.infoln(format!("docker[{image}] {cmd_str}")); Some( - build_container::run_in_container( + build_docker::run_in_docker( &cmd, image, workspace_root, diff --git a/cmd/soroban-cli/src/commands/contract/build_container.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs similarity index 96% rename from cmd/soroban-cli/src/commands/contract/build_container.rs rename to cmd/soroban-cli/src/commands/contract/build_docker.rs index fc0212b1c7..60496d2ffc 100644 --- a/cmd/soroban-cli/src/commands/contract/build_container.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -26,28 +26,28 @@ const RUSTUP_DIR: &str = "/usr/local/rustup"; #[derive(thiserror::Error, Debug)] pub enum Error { - #[error("cannot connect to container runtime; is it running? ({0})")] + #[error("cannot connect to docker daemon; is it running? ({0})")] RuntimeNotRunning(ContainerError), - #[error("pulling container image {image}: {source}")] + #[error("pulling docker image {image}: {source}")] ImagePull { image: String, source: bollard::errors::Error, }, - #[error("inspecting container image {image}: {source}")] + #[error("inspecting docker image {image}: {source}")] ImageInspect { image: String, source: bollard::errors::Error, }, - #[error("container image {image} has no repository digest; pin via --backend container=/@sha256:...")] + #[error("docker image {image} has no repository digest; pin via --backend docker=/@sha256:...")] NoDigest { image: String }, - #[error("build failed inside container (exit {0})")] + #[error("build failed inside docker container (exit {0})")] BuildExit(i64), - #[error("container runtime: {0}")] + #[error("docker run: {0}")] Runtime(#[from] bollard::errors::Error), #[error("resolving CARGO_HOME / RUSTUP_HOME: {0}")] @@ -57,7 +57,7 @@ pub enum Error { /// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, /// returning the resolved `name@sha256:...` reference for embedding into meta. #[allow(clippy::too_many_arguments)] -pub async fn run_in_container( +pub async fn run_in_docker( cmd: &Command, image: &str, workspace_root: &Path, diff --git a/cmd/soroban-cli/src/commands/contract/mod.rs b/cmd/soroban-cli/src/commands/contract/mod.rs index 3014ffdd36..602e107972 100644 --- a/cmd/soroban-cli/src/commands/contract/mod.rs +++ b/cmd/soroban-cli/src/commands/contract/mod.rs @@ -3,7 +3,7 @@ pub mod arg_parsing; pub mod asset; pub mod bindings; pub mod build; -pub mod build_container; +pub mod build_docker; pub mod deploy; pub mod extend; pub mod fetch; diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index c24fcbe64c..bb98368b37 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -13,7 +13,7 @@ use crate::commands::container::shared::Args as ContainerArgs; use crate::commands::{global, version}; use crate::print::Print; -/// Verify a wasm by rebuilding it inside the container image recorded in its metadata. +/// Verify a wasm by rebuilding it inside the Docker image recorded in its metadata. /// /// Succeeds only if the rebuilt artifact is byte-identical to the input. /// User is responsible for checking out the matching commit before running. @@ -42,7 +42,7 @@ pub enum Error { Build(#[from] build::Error), #[error("stellar asset contract has no source to verify")] StellarAssetContract, - #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --backend container` to make it verifiable")] + #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --backend docker` to make it verifiable")] MissingMeta(&'static str), #[error("stellar-cli version mismatch: contract was built with '{expected}', running stellar-cli is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, @@ -79,7 +79,7 @@ impl Cmd { print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); - print.blankln(format!("Container image: {bldimg}")); + print.blankln(format!("Docker image: {bldimg}")); let running = version::one_line(); if cliver != running { @@ -99,7 +99,7 @@ impl Cmd { let build_cmd = build::Cmd { manifest_path: self.manifest_path.clone(), - backend: build::Backend::Container { image: bldimg }, + backend: build::Backend::Docker { image: bldimg }, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), ..build::Cmd::default() From a8f683aa0e5f2428ddbd34fe686160f573bb601d Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 04:49:35 +0000 Subject: [PATCH 34/64] embed source_repo/source_rev meta; deploy mainnet check; verify multi-contract --- .../src/commands/contract/build.rs | 168 ++++++++++++++++-- .../src/commands/contract/deploy/wasm.rs | 37 ++++ .../src/commands/contract/verify.rs | 91 ++-------- 3 files changed, 212 insertions(+), 84 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 1241f1f0bd..5ef5fc42b9 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -341,6 +341,21 @@ impl Cmd { let wasm_target = get_wasm_target()?; let mut built_contracts = Vec::new(); + // Detect git state once for the build. Embed source_repo/source_rev + // when the workspace is a clean git checkout with an origin remote; + // warn (but proceed) otherwise so users know the wasm won't be + // reproducible against a public source. + let (source_repo, source_rev) = match detect_git_state(workspace_root) { + GitState::Clean { repo, rev } => (Some(repo), Some(rev)), + GitState::Dirty => { + print.warnln( + "git working tree has uncommitted changes; source_repo/source_rev not embedded in contract metadata. Commit changes for a reproducible build.", + ); + (None, None) + } + GitState::NotARepo => (None, None), + }; + for (i, p) in packages.iter().enumerate() { if i > 0 { // Blank line separating successive contract builds in a workspace. @@ -444,7 +459,14 @@ impl Cmd { .join(&self.profile) .join(&file); - self.inject_meta(&target_file_path, bldimg.as_deref())?; + self.inject_meta( + &target_file_path, + &ExtraMeta { + bldimg: bldimg.clone(), + source_repo: source_repo.clone(), + source_rev: source_rev.clone(), + }, + )?; Self::filter_spec(&target_file_path)?; let final_path = if let Some(out_dir) = &self.out_dir { @@ -561,9 +583,9 @@ impl Cmd { cmd.exec() } - fn inject_meta(&self, target_file_path: &PathBuf, bldimg: Option<&str>) -> Result<(), Error> { + fn inject_meta(&self, target_file_path: &PathBuf, extra: &ExtraMeta) -> Result<(), Error> { let mut wasm_bytes = fs::read(target_file_path).map_err(Error::ReadingWasmFile)?; - let xdr = self.encoded_new_meta(bldimg)?; + let xdr = self.encoded_new_meta(extra)?; wasm_gen::write_custom_section(&mut wasm_bytes, META_CUSTOM_SECTION_NAME, &xdr); // Deleting .wasm file effectively unlinking it from /release/deps/.wasm preventing from overwrite @@ -612,7 +634,7 @@ impl Cmd { fs::write(target_file_path, new_wasm).map_err(Error::WritingWasmFile) } - fn encoded_new_meta(&self, bldimg: Option<&str>) -> Result, Error> { + fn encoded_new_meta(&self, extra: &ExtraMeta) -> Result, Error> { let mut new_meta: Vec = Vec::new(); // Always inject CLI version @@ -622,16 +644,25 @@ impl Cmd { }); new_meta.push(cli_meta_entry); - // Reproducible build image (only when --docker was used). The matching - // rustc version is recorded as `rsver` by soroban-sdk itself. - if let Some(image) = bldimg { - let key: StringM = "bldimg" + // Reproducible-build meta. `rsver` (rustc version) is recorded by + // soroban-sdk itself; here we add `bldimg` when --backend docker + // was used, and source_repo/source_rev when the workspace was a + // clean git checkout. + let kvs = [ + ("bldimg", extra.bldimg.as_deref()), + ("source_repo", extra.source_repo.as_deref()), + ("source_rev", extra.source_rev.as_deref()), + ]; + for (k, v) in kvs { + let Some(v) = v else { continue }; + let key: StringM = k .to_string() .try_into() - .map_err(|e| Error::MetaArg(format!("bldimg is an invalid metadata key: {e}")))?; - let val: StringM = image.to_string().try_into().map_err(|e| { - Error::MetaArg(format!("{image} is an invalid metadata value: {e}")) - })?; + .map_err(|e| Error::MetaArg(format!("{k} is an invalid metadata key: {e}")))?; + let val: StringM = v + .to_string() + .try_into() + .map_err(|e| Error::MetaArg(format!("{v} is an invalid metadata value: {e}")))?; new_meta.push(ScMetaEntry::ScMetaV0(ScMetaV0 { key, val })); } @@ -731,6 +762,98 @@ impl Cmd { } } +/// Extra meta entries to embed in the wasm's `contractmetav0` custom section. +/// `cliver` is always embedded (separately). `rsver` is embedded by soroban-sdk. +#[derive(Default, Debug, Clone)] +struct ExtraMeta { + /// `bldimg`: fully-qualified container image used to build (e.g. + /// `docker.io/library/rust@sha256:...`). Set when `--backend docker`. + bldimg: Option, + /// `source_repo`: HTTPS URL of the workspace's git origin remote. + /// Set only when the workspace is a clean git checkout. + source_repo: Option, + /// `source_rev`: full SHA of the workspace's git HEAD commit. + /// Set only when the workspace is a clean git checkout. + source_rev: Option, +} + +enum GitState { + NotARepo, + Dirty, + Clean { repo: String, rev: String }, +} + +fn detect_git_state(workspace_root: &Path) -> GitState { + let in_repo = Command::new("git") + .arg("-C") + .arg(workspace_root) + .args(["rev-parse", "--git-dir"]) + .output() + .map(|o| o.status.success()) + .unwrap_or(false); + if !in_repo { + return GitState::NotARepo; + } + let dirty = Command::new("git") + .arg("-C") + .arg(workspace_root) + .args(["status", "--porcelain"]) + .output() + .map(|o| !o.stdout.is_empty()) + .unwrap_or(true); + if dirty { + return GitState::Dirty; + } + let Some(repo) = git_output(workspace_root, &["remote", "get-url", "origin"]) + .as_deref() + .and_then(remote_to_https) + else { + return GitState::Dirty; + }; + let Some(rev) = git_output(workspace_root, &["rev-parse", "HEAD"]) else { + return GitState::Dirty; + }; + GitState::Clean { repo, rev } +} + +fn git_output(workspace_root: &Path, args: &[&str]) -> Option { + let out = Command::new("git") + .arg("-C") + .arg(workspace_root) + .args(args) + .output() + .ok()?; + out.status + .success() + .then(|| String::from_utf8_lossy(&out.stdout).trim().to_string()) + .filter(|s| !s.is_empty()) +} + +/// Convert a git remote URL to a canonical `https://...` form. +fn remote_to_https(url: &str) -> Option { + let url = url.trim(); + let canonical = if url.starts_with("https://") || url.starts_with("http://") { + url.to_string() + } else if let Some(rest) = url.strip_prefix("git@") { + let (host, path) = rest.split_once(':')?; + format!("https://{host}/{path}") + } else if let Some(rest) = url.strip_prefix("ssh://git@") { + format!("https://{rest}") + } else if let Some(rest) = url.strip_prefix("ssh://") { + format!("https://{rest}") + } else if let Some(rest) = url.strip_prefix("git://") { + format!("https://{rest}") + } else { + return None; + }; + Some( + canonical + .strip_suffix(".git") + .unwrap_or(&canonical) + .to_string(), + ) +} + fn serialize_command(cmd: &Command) -> String { let mut parts = Vec::::new(); parts.extend(cmd.get_envs().map(|(key, val)| { @@ -1017,4 +1140,25 @@ mod tests { "shlex round-trip failed: {raw_arg:?} not found as a single token in {tokens:?}" ); } + + #[test] + fn remote_to_https_normalizes_common_forms() { + let cases = [ + ("https://github.com/x/y", "https://github.com/x/y"), + ("https://github.com/x/y.git", "https://github.com/x/y"), + ("http://example.com/x/y.git", "http://example.com/x/y"), + ("git@github.com:x/y.git", "https://github.com/x/y"), + ("ssh://git@github.com/x/y.git", "https://github.com/x/y"), + ("ssh://git.example.com/x/y", "https://git.example.com/x/y"), + ("git://github.com/x/y.git", "https://github.com/x/y"), + ]; + for (input, want) in cases { + assert_eq!( + remote_to_https(input).as_deref(), + Some(want), + "input: {input}" + ); + } + assert_eq!(remote_to_https("notaurl"), None); + } } diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index d9658d900c..356e2afe11 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -370,6 +370,7 @@ impl Cmd { } get_remote_wasm_from_hash(&client, &wasm_hash).await? }; + warn_if_mainnet_wasm_not_reproducible(&raw_wasm, &network.network_passphrase, &print); let entries = soroban_spec_tools::contract::Spec::new(&raw_wasm)?.spec; let res = soroban_spec_tools::Spec::new(entries.clone().as_slice()); let constructor_params = if let Ok(func) = res.find_function(CONSTRUCTOR_FUNCTION_NAME) { @@ -472,6 +473,42 @@ fn build_create_contract_tx( Ok(tx) } +/// On mainnet, warn if the wasm is missing the meta entries that indicate +/// a reproducible build (`bldimg`, `rsver`, `cliver`, `source_repo`, +/// `source_rev`). Skipped on other networks. Best-effort: parse failures are +/// silently ignored. +fn warn_if_mainnet_wasm_not_reproducible( + wasm_bytes: &[u8], + network_passphrase: &str, + print: &Print, +) { + if network_passphrase != crate::config::network::passphrase::MAINNET { + return; + } + let Ok(spec) = soroban_spec_tools::contract::Spec::new(wasm_bytes) else { + return; + }; + let required = ["cliver", "bldimg", "rsver", "source_repo", "source_rev"]; + let missing: Vec<&str> = required + .iter() + .filter(|k| { + !spec.meta.iter().any(|e| { + let crate::xdr::ScMetaEntry::ScMetaV0(crate::xdr::ScMetaV0 { key, .. }) = e; + key.to_string() == **k + }) + }) + .copied() + .collect(); + if missing.is_empty() { + return; + } + print.warnln(format!( + "the wasm being deployed is missing reproducibility meta entries: {missing:?}. \ + The deployed wasm may not be independently verifiable. To make it reproducible, \ + build with `stellar contract build --backend docker` in a clean git repository." + )); +} + #[cfg(test)] mod tests { use super::*; diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index bb98368b37..e4160b175a 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -1,7 +1,6 @@ use std::fs; use std::path::PathBuf; -use cargo_metadata::MetadataCommand; use clap::Parser; use sha2::{Digest, Sha256}; use soroban_spec_tools::contract::{self, Spec}; @@ -15,8 +14,9 @@ use crate::print::Print; /// Verify a wasm by rebuilding it inside the Docker image recorded in its metadata. /// -/// Succeeds only if the rebuilt artifact is byte-identical to the input. -/// User is responsible for checking out the matching commit before running. +/// All cdylib contracts in the workspace are rebuilt; verification succeeds if +/// any rebuilt artifact is byte-identical to the input. The user is responsible +/// for checking out the matching commit before running. #[derive(Parser, Debug, Clone)] #[group(skip)] pub struct Cmd { @@ -46,20 +46,13 @@ pub enum Error { MissingMeta(&'static str), #[error("stellar-cli version mismatch: contract was built with '{expected}', running stellar-cli is '{actual}'. Install the matching CLI version and re-run.")] CliVersionMismatch { expected: String, actual: String }, - #[error("verification failed: rebuilt {name} ({actual}) does not match original ({expected})")] + #[error("verification failed: none of the rebuilt artifacts ({}) match original ({expected})", produced.iter().map(|(n, h)| format!("{n}={h}")).collect::>().join(", "))] Mismatch { - name: String, expected: String, - actual: String, + produced: Vec<(String, String)>, }, #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), - #[error("expected source to produce exactly one cdylib contract, found {found:?}; verify only supports a single contract per invocation")] - ExpectedSingleContract { found: Vec }, - #[error("reading cargo metadata: {0}")] - Metadata(#[from] cargo_metadata::Error), - #[error("resolving source path: {0}")] - AbsolutePath(std::io::Error), } impl Cmd { @@ -89,14 +82,6 @@ impl Cmd { }); } - // Verify takes a single wasm input, so the source must produce exactly - // one cdylib contract. Detect this up-front via cargo metadata so we - // don't waste a build cycle on a workspace with multiple contracts. - let cdylibs = single_cdylib_or_workspace_cdylibs(self.manifest_path.as_deref())?; - if cdylibs.len() != 1 { - return Err(Error::ExpectedSingleContract { found: cdylibs }); - } - let build_cmd = build::Cmd { manifest_path: self.manifest_path.clone(), backend: build::Backend::Docker { image: bldimg }, @@ -105,71 +90,33 @@ impl Cmd { ..build::Cmd::default() }; let built = build_cmd.run(global_args).await?; - let c = match built.as_slice() { - [c] => c, - other => { - return Err(Error::ExpectedSingleContract { - found: other.iter().map(|c| c.name.clone()).collect(), - }); + + // Hash every rebuilt artifact and find one that matches. + let mut produced = Vec::with_capacity(built.len()); + let mut matched = None; + for c in &built { + let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; + let hash = hex::encode(Sha256::digest(&bytes)); + if matched.is_none() && hash == original_hash { + matched = Some(c.name.clone()); } - }; + produced.push((c.name.clone(), hash)); + } - let bytes = fs::read(&c.path).map_err(Error::ReadingRebuilt)?; - let hash = hex::encode(Sha256::digest(&bytes)); - if hash == original_hash { + if let Some(name) = matched { print.checkln(format!( - "Verified: rebuilt {} wasm matches {original_hash}", - c.name + "Verified: rebuilt {name} wasm matches {original_hash}" )); Ok(()) } else { Err(Error::Mismatch { - name: c.name.clone(), expected: original_hash, - actual: hash, + produced, }) } } } -/// Mirror what `build::Cmd::packages` selects: if `manifest_path` points at a -/// specific package, return that package's name iff it is a cdylib; otherwise -/// (workspace root, or no manifest given) return the names of all -/// workspace-member cdylibs. -fn single_cdylib_or_workspace_cdylibs( - manifest_path: Option<&std::path::Path>, -) -> Result, Error> { - let mut cmd = MetadataCommand::new(); - cmd.no_deps(); - if let Some(p) = manifest_path { - cmd.manifest_path(p); - } - let metadata = cmd.exec()?; - let manifest_abs = match manifest_path { - Some(p) => Some(std::path::absolute(p).map_err(Error::AbsolutePath)?), - None => None, - }; - let is_cdylib = |p: &cargo_metadata::Package| { - p.targets - .iter() - .any(|t| t.crate_types.iter().any(|c| c == "cdylib")) - }; - let specific = manifest_abs - .as_ref() - .and_then(|abs| metadata.packages.iter().find(|p| p.manifest_path == *abs)); - Ok(match specific { - Some(p) if is_cdylib(p) => vec![p.name.clone()], - Some(_) => vec![], - None => metadata - .packages - .iter() - .filter(|p| metadata.workspace_members.contains(&p.id)) - .filter(|p| is_cdylib(p)) - .map(|p| p.name.clone()) - .collect(), - }) -} - fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { meta.iter().find_map(|entry| { let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = entry; From ec14369de04d5fe6d2a1e86859514558d40d84a1 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:05:23 +0000 Subject: [PATCH 35/64] minimise diff: revert unrelated info/shared.rs edits; tighten target install --- .../src/commands/contract/build_docker.rs | 46 ++++++++----------- .../src/commands/contract/info/shared.rs | 8 ++-- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 60496d2ffc..32615bfc22 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -109,19 +109,15 @@ pub async fn run_in_docker( .map(std::borrow::Cow::into_owned) .collect(); // Always install the wasm target before the build so we don't depend on - // the workspace's `rust-toolchain.toml` having configured it. When pinning, - // install for the override toolchain; otherwise the rustup default applies. - // Args pass through `$@` so we don't have to shell-escape. - let target_install = match pin_toolchain { - Some(toolchain) => { - format!("rustup --quiet target add --toolchain {toolchain} {wasm_target}") - } - None => format!("rustup --quiet target add {wasm_target}"), - }; + // the workspace's `rust-toolchain.toml` having configured it. Args pass + // through `$@` so we don't have to shell-escape. + let toolchain_arg = pin_toolchain + .map(|t| format!("--toolchain {t} ")) + .unwrap_or_default(); let mut container_cmd = vec![ "sh".to_string(), "-c".to_string(), - format!("{target_install} && exec \"$@\""), + format!("rustup --quiet target add {toolchain_arg}{wasm_target} && exec \"$@\""), "sh".to_string(), ]; container_cmd.extend(argv); @@ -243,8 +239,8 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E // local registry config. async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { let canonical = fully_qualify(strip_tag(image)); - let digest = if let Some((_, digest)) = parse_pinned_digest(image) { - digest.to_string() + let digest = if let Some(d) = sha256_digest(image) { + d.to_string() } else { docker .inspect_image(image) @@ -256,7 +252,7 @@ async fn resolve_image_digest(docker: &Docker, image: &str) -> Result Result Option<(&str, &str)> { - let (name, after) = image.rsplit_once('@')?; - after.starts_with("sha256:").then_some((name, after)) +/// Returns the `sha256:...` portion of a `@sha256:...` reference, if present. +fn sha256_digest(image: &str) -> Option<&str> { + let (_, after) = image.rsplit_once('@')?; + after.starts_with("sha256:").then_some(after) } /// Strip any `@sha256:...` and `:tag` suffix, leaving only the repository name. @@ -333,17 +330,14 @@ mod tests { use super::*; #[test] - fn parse_pinned_digest_cases() { - assert_eq!(parse_pinned_digest("name"), None); - assert_eq!(parse_pinned_digest("name:tag"), None); - assert_eq!(parse_pinned_digest("name@md5:abc"), None); - assert_eq!( - parse_pinned_digest("name@sha256:abc"), - Some(("name", "sha256:abc")) - ); + fn sha256_digest_cases() { + assert_eq!(sha256_digest("name"), None); + assert_eq!(sha256_digest("name:tag"), None); + assert_eq!(sha256_digest("name@md5:abc"), None); + assert_eq!(sha256_digest("name@sha256:abc"), Some("sha256:abc")); assert_eq!( - parse_pinned_digest("host:5000/name:tag@sha256:abc"), - Some(("host:5000/name:tag", "sha256:abc")) + sha256_digest("host:5000/name:tag@sha256:abc"), + Some("sha256:abc") ); } diff --git a/cmd/soroban-cli/src/commands/contract/info/shared.rs b/cmd/soroban-cli/src/commands/contract/info/shared.rs index 183ce9dcad..b06d6eb4dc 100644 --- a/cmd/soroban-cli/src/commands/contract/info/shared.rs +++ b/cmd/soroban-cli/src/commands/contract/info/shared.rs @@ -122,7 +122,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { // Check if a local WASM file path is provided if let Some(path) = &args.wasm { // Read the WASM file and return its contents - print.infoln("Loading contract from file..."); + print.infoln("Loading contract spec from file..."); let wasm_bytes = wasm::Args { wasm: path.clone() }.read()?; return Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, @@ -148,7 +148,9 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { .verify_network_passphrase(Some(&network.network_passphrase)) .await?; - print.globeln(format!("Downloading contract for wasm hash: {wasm_hash}")); + print.globeln(format!( + "Downloading contract spec for wasm hash: {wasm_hash}" + )); let wasm_bytes = get_remote_wasm_from_hash(&client, &hash).await?; Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, @@ -162,7 +164,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { contract_id.resolve_contract_id(&args.locator, &network.network_passphrase)?; let derived_address = xdr::ScAddress::Contract(ContractId(xdr::Hash(contract_id.0))).to_string(); - print.globeln(format!("Downloading contract: {derived_address}")); + print.globeln(format!("Downloading contract spec: {derived_address}")); let res = wasm::fetch_from_contract(&contract_id, network).await; if let Some(ContractIsStellarAsset) = res.as_ref().err() { return Ok(Fetched { From 5a2c5fd30ba6b8e5047ef96fa37455fb769b0d20 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:08:00 +0000 Subject: [PATCH 36/64] info: drop 'spec' from log messages (loading the contract, not just its spec) --- cmd/soroban-cli/src/commands/contract/info/shared.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/info/shared.rs b/cmd/soroban-cli/src/commands/contract/info/shared.rs index b06d6eb4dc..183ce9dcad 100644 --- a/cmd/soroban-cli/src/commands/contract/info/shared.rs +++ b/cmd/soroban-cli/src/commands/contract/info/shared.rs @@ -122,7 +122,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { // Check if a local WASM file path is provided if let Some(path) = &args.wasm { // Read the WASM file and return its contents - print.infoln("Loading contract spec from file..."); + print.infoln("Loading contract from file..."); let wasm_bytes = wasm::Args { wasm: path.clone() }.read()?; return Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, @@ -148,9 +148,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { .verify_network_passphrase(Some(&network.network_passphrase)) .await?; - print.globeln(format!( - "Downloading contract spec for wasm hash: {wasm_hash}" - )); + print.globeln(format!("Downloading contract for wasm hash: {wasm_hash}")); let wasm_bytes = get_remote_wasm_from_hash(&client, &hash).await?; Ok(Fetched { contract: Contract::Wasm { wasm_bytes }, @@ -164,7 +162,7 @@ pub async fn fetch(args: &Args, print: &Print) -> Result { contract_id.resolve_contract_id(&args.locator, &network.network_passphrase)?; let derived_address = xdr::ScAddress::Contract(ContractId(xdr::Hash(contract_id.0))).to_string(); - print.globeln(format!("Downloading contract spec: {derived_address}")); + print.globeln(format!("Downloading contract: {derived_address}")); let res = wasm::fetch_from_contract(&contract_id, network).await; if let Some(ContractIsStellarAsset) = res.as_ref().err() { return Ok(Fetched { From f729cec80efd9b3ac5aeddaa7bda67487d5290e3 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:12:23 +0000 Subject: [PATCH 37/64] build: print cargo invocation after pull progress in docker mode --- cmd/soroban-cli/src/commands/contract/build.rs | 2 +- cmd/soroban-cli/src/commands/contract/build_docker.rs | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 5ef5fc42b9..a8887538e9 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -429,10 +429,10 @@ impl Cmd { println!("{cmd_str}"); } else { let bldimg = if let Some(image) = self.backend.docker_image() { - print.infoln(format!("docker[{image}] {cmd_str}")); Some( build_docker::run_in_docker( &cmd, + &cmd_str, image, workspace_root, target_dir.as_std_path(), diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 32615bfc22..10d7383405 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -59,6 +59,7 @@ pub enum Error { #[allow(clippy::too_many_arguments)] pub async fn run_in_docker( cmd: &Command, + cmd_str: &str, image: &str, workspace_root: &Path, target_dir: &Path, @@ -74,6 +75,9 @@ pub async fn run_in_docker( pull_image(&docker, image, print).await?; let resolved = resolve_image_digest(&docker, image).await?; + // Print the cargo invocation after the pull progress so the on-screen + // order matches execution: pull → cargo → cargo output. + print.infoln(format!("docker[{image}] {cmd_str}")); // Bind-mount the host's cargo registry and rustup state. Bind mounts // preserve host ownership, so the container (running as the host user) From 11f590b167df0c37f4b0f87985c9d55ebb7c2bf7 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:12:45 +0000 Subject: [PATCH 38/64] build: drop docker[image] prefix from cargo invocation line --- cmd/soroban-cli/src/commands/contract/build_docker.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 10d7383405..1229b90a94 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -77,7 +77,7 @@ pub async fn run_in_docker( let resolved = resolve_image_digest(&docker, image).await?; // Print the cargo invocation after the pull progress so the on-screen // order matches execution: pull → cargo → cargo output. - print.infoln(format!("docker[{image}] {cmd_str}")); + print.infoln(cmd_str); // Bind-mount the host's cargo registry and rustup state. Bind mounts // preserve host ownership, so the container (running as the host user) From a232fd561795e538aa45bc853eca842d4f76d990 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:19:09 +0000 Subject: [PATCH 39/64] build: pull docker image only once across multi-contract workspaces --- .../src/commands/contract/build.rs | 30 +++++++++++-------- .../src/commands/contract/build_docker.rs | 13 ++++++-- 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index a8887538e9..23554e7fab 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -340,6 +340,9 @@ impl Cmd { let wasm_target = get_wasm_target()?; let mut built_contracts = Vec::new(); + // Cache the resolved image digest across multi-contract workspace + // builds so the docker pull only runs once. + let mut resolved_image: Option = None; // Detect git state once for the build. Embed source_repo/source_rev // when the workspace is a clean git checkout with an origin remote; @@ -429,20 +432,21 @@ impl Cmd { println!("{cmd_str}"); } else { let bldimg = if let Some(image) = self.backend.docker_image() { - Some( - build_docker::run_in_docker( - &cmd, - &cmd_str, - image, - workspace_root, - target_dir.as_std_path(), - &wasm_target, - self.rustup_toolchain.as_deref(), - &self.container_args, - &print, - ) - .await?, + let r = build_docker::run_in_docker( + &cmd, + &cmd_str, + image, + resolved_image.as_deref(), + workspace_root, + target_dir.as_std_path(), + &wasm_target, + self.rustup_toolchain.as_deref(), + &self.container_args, + &print, ) + .await?; + resolved_image = Some(r.clone()); + Some(r) } else { print.infoln(cmd_str); let status = cmd.status().map_err(Error::CargoCmd)?; diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 1229b90a94..8c226651fb 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -61,6 +61,7 @@ pub async fn run_in_docker( cmd: &Command, cmd_str: &str, image: &str, + pre_resolved: Option<&str>, workspace_root: &Path, target_dir: &Path, wasm_target: &str, @@ -73,8 +74,16 @@ pub async fn run_in_docker( .await .map_err(Error::RuntimeNotRunning)?; - pull_image(&docker, image, print).await?; - let resolved = resolve_image_digest(&docker, image).await?; + // Pull and resolve only on the first call; subsequent invocations within + // the same build (e.g. workspace with multiple contracts) reuse the + // already-resolved digest and skip the pull progress output. + let resolved = match pre_resolved { + Some(r) => r.to_string(), + None => { + pull_image(&docker, image, print).await?; + resolve_image_digest(&docker, image).await? + } + }; // Print the cargo invocation after the pull progress so the on-screen // order matches execution: pull → cargo → cargo output. print.infoln(cmd_str); From 8401052f089b61f338f9c16cb0951de707f70176 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:19:52 +0000 Subject: [PATCH 40/64] build_docker: prefer if-let over single-pattern match --- cmd/soroban-cli/src/commands/contract/build_docker.rs | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 8c226651fb..2885096315 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -77,12 +77,11 @@ pub async fn run_in_docker( // Pull and resolve only on the first call; subsequent invocations within // the same build (e.g. workspace with multiple contracts) reuse the // already-resolved digest and skip the pull progress output. - let resolved = match pre_resolved { - Some(r) => r.to_string(), - None => { - pull_image(&docker, image, print).await?; - resolve_image_digest(&docker, image).await? - } + let resolved = if let Some(r) = pre_resolved { + r.to_string() + } else { + pull_image(&docker, image, print).await?; + resolve_image_digest(&docker, image).await? }; // Print the cargo invocation after the pull progress so the on-screen // order matches execution: pull → cargo → cargo output. From fa4a1f5b3f9c228125e6cf417e797bd81d045556 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:21:54 +0000 Subject: [PATCH 41/64] verify: blank line before verdict when multiple contracts were built --- cmd/soroban-cli/src/commands/contract/verify.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index e4160b175a..2f3180e9fc 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -103,6 +103,11 @@ impl Cmd { produced.push((c.name.clone(), hash)); } + // For multi-contract workspaces, separate the per-contract build + // output from the final verdict with a blank line. + if built.len() > 1 { + eprintln!(); + } if let Some(name) = matched { print.checkln(format!( "Verified: rebuilt {name} wasm matches {original_hash}" From 018f76eb5800b08dda917880ea61e47e2fce8d0a Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 11:44:58 +0000 Subject: [PATCH 42/64] container: stop mounting host's ~/.rustup (platform-mismatched binaries) --- .../src/commands/contract/build_docker.rs | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 2885096315..5ebbbb606b 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -22,7 +22,6 @@ const PLATFORM: &str = "linux/amd64"; pub const WORK_DIR: &str = "/workspace"; const TARGET_DIR: &str = "/target"; const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; -const RUSTUP_DIR: &str = "/usr/local/rustup"; #[derive(thiserror::Error, Debug)] pub enum Error { @@ -87,17 +86,19 @@ pub async fn run_in_docker( // order matches execution: pull → cargo → cargo output. print.infoln(cmd_str); - // Bind-mount the host's cargo registry and rustup state. Bind mounts - // preserve host ownership, so the container (running as the host user) - // can write to them. This caches crate downloads and installed - // toolchains across runs. + // Bind-mount the host's cargo registry to cache crate downloads across + // runs. Crate sources are platform-agnostic so this is safe. + // + // We deliberately do not mount the host's `~/.rustup`: it contains + // toolchain binaries built for the host's OS/arch (e.g. Mach-O on macOS), + // which the linux/amd64 container cannot exec. The image's pre-installed + // rustup state is used instead; the wasm target is installed on each + // container run. let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; - let rustup_home = home::rustup_home().map_err(Error::CargoHome)?; let binds = vec![ format!("{}:{}", workspace_root.display(), WORK_DIR), format!("{}:{}", target_dir.display(), TARGET_DIR), format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), - format!("{}:{}", rustup_home.display(), RUSTUP_DIR), ]; let mut env: Vec = cmd From d5fb1cb0226dfe4a1c79fd49a71667befa06b558 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:01:15 +0000 Subject: [PATCH 43/64] deploy/upload: support --backend and --docker-host for auto-build path --- cmd/soroban-cli/src/commands/contract/build.rs | 2 +- .../src/commands/contract/deploy/wasm.rs | 18 ++++++++++++++++++ .../src/commands/contract/upload.rs | 14 ++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 23554e7fab..c57b770b4e 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -158,7 +158,7 @@ impl Backend { const DEFAULT_DOCKER_IMAGE: &str = "docker.io/library/rust:latest"; -fn parse_backend(s: &str) -> Result { +pub fn parse_backend(s: &str) -> Result { match s { "local" => Ok(Backend::Local), "docker" => Ok(Backend::Docker { diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index 356e2afe11..64bb564050 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -74,6 +74,18 @@ pub struct Cmd { /// Package to build when auto-building without --wasm #[arg(long, help_heading = "Build Options", conflicts_with = "wasm_src")] pub package: Option, + /// Build backend; see `stellar contract build --help` for values. + #[arg( + long, + value_name = "BACKEND", + default_value = "local", + value_parser = build::parse_backend, + help_heading = "Build Options", + conflicts_with = "wasm_src", + )] + pub backend: build::Backend, + #[command(flatten)] + pub container_args: crate::commands::container::shared::Args, #[command(flatten)] pub build_args: build::BuildArgs, } @@ -279,6 +291,8 @@ impl Cmd { // Neither provided: auto-build let build_cmd = build::Cmd { package: self.package.clone(), + backend: self.backend.clone(), + container_args: self.container_args.clone(), build_args: self.build_args.clone(), ..build::Cmd::default() }; @@ -316,6 +330,10 @@ impl Cmd { ignore_checks: self.ignore_checks, build_only: is_build, package: None, + backend: build::Backend::Local, + container_args: crate::commands::container::shared::Args { + docker_host: None, + }, build_args: build::BuildArgs::default(), } .execute(config, quiet, no_cache) diff --git a/cmd/soroban-cli/src/commands/contract/upload.rs b/cmd/soroban-cli/src/commands/contract/upload.rs index de521531ce..f1ecc7fd30 100644 --- a/cmd/soroban-cli/src/commands/contract/upload.rs +++ b/cmd/soroban-cli/src/commands/contract/upload.rs @@ -56,6 +56,18 @@ pub struct Cmd { /// Package to build when --wasm is not provided #[arg(long, help_heading = "Build Options", conflicts_with = "wasm")] pub package: Option, + /// Build backend; see `stellar contract build --help` for values. + #[arg( + long, + value_name = "BACKEND", + default_value = "local", + value_parser = build::parse_backend, + help_heading = "Build Options", + conflicts_with = "wasm", + )] + pub backend: build::Backend, + #[command(flatten)] + pub container_args: crate::commands::container::shared::Args, #[command(flatten)] pub build_args: build::BuildArgs, } @@ -178,6 +190,8 @@ impl Cmd { } else { let build_cmd = build::Cmd { package: self.package.clone(), + backend: self.backend.clone(), + container_args: self.container_args.clone(), build_args: self.build_args.clone(), ..build::Cmd::default() }; From eef7e7ce4a48f3743cff69482925f2e837081009 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:36:06 +0000 Subject: [PATCH 44/64] embed source_path meta (manifest path relative to git root) --- .../src/commands/contract/build.rs | 39 ++++++++++++++----- .../src/commands/contract/deploy/wasm.rs | 9 ++++- .../src/commands/contract/verify.rs | 4 ++ 3 files changed, 41 insertions(+), 11 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index c57b770b4e..80f4a3b30d 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -345,18 +345,18 @@ impl Cmd { let mut resolved_image: Option = None; // Detect git state once for the build. Embed source_repo/source_rev - // when the workspace is a clean git checkout with an origin remote; - // warn (but proceed) otherwise so users know the wasm won't be - // reproducible against a public source. - let (source_repo, source_rev) = match detect_git_state(workspace_root) { - GitState::Clean { repo, rev } => (Some(repo), Some(rev)), + // (and per-package source_path) when the workspace is a clean git + // checkout with an origin remote; warn (but proceed) otherwise so + // users know the wasm won't be reproducible against a public source. + let (source_repo, source_rev, git_root) = match detect_git_state(workspace_root) { + GitState::Clean { repo, rev, root } => (Some(repo), Some(rev), Some(root)), GitState::Dirty => { print.warnln( - "git working tree has uncommitted changes; source_repo/source_rev not embedded in contract metadata. Commit changes for a reproducible build.", + "git working tree has uncommitted changes; source_repo/source_rev/source_path not embedded in contract metadata. Commit changes for a reproducible build.", ); - (None, None) + (None, None, None) } - GitState::NotARepo => (None, None), + GitState::NotARepo => (None, None, None), }; for (i, p) in packages.iter().enumerate() { @@ -463,12 +463,17 @@ impl Cmd { .join(&self.profile) .join(&file); + let source_path = git_root.as_deref().and_then(|gr| { + pathdiff::diff_paths(&p.manifest_path, gr) + .map(|p| p.to_string_lossy().into_owned()) + }); self.inject_meta( &target_file_path, &ExtraMeta { bldimg: bldimg.clone(), source_repo: source_repo.clone(), source_rev: source_rev.clone(), + source_path, }, )?; Self::filter_spec(&target_file_path)?; @@ -656,6 +661,7 @@ impl Cmd { ("bldimg", extra.bldimg.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), + ("source_path", extra.source_path.as_deref()), ]; for (k, v) in kvs { let Some(v) = v else { continue }; @@ -779,12 +785,20 @@ struct ExtraMeta { /// `source_rev`: full SHA of the workspace's git HEAD commit. /// Set only when the workspace is a clean git checkout. source_rev: Option, + /// `source_path`: the package's `Cargo.toml` path relative to the git + /// repo root, e.g. `contracts/foo/Cargo.toml`. Set only when the + /// workspace is a clean git checkout. + source_path: Option, } enum GitState { NotARepo, Dirty, - Clean { repo: String, rev: String }, + Clean { + repo: String, + rev: String, + root: PathBuf, + }, } fn detect_git_state(workspace_root: &Path) -> GitState { @@ -817,7 +831,12 @@ fn detect_git_state(workspace_root: &Path) -> GitState { let Some(rev) = git_output(workspace_root, &["rev-parse", "HEAD"]) else { return GitState::Dirty; }; - GitState::Clean { repo, rev } + let Some(root) = + git_output(workspace_root, &["rev-parse", "--show-toplevel"]).map(PathBuf::from) + else { + return GitState::Dirty; + }; + GitState::Clean { repo, rev, root } } fn git_output(workspace_root: &Path, args: &[&str]) -> Option { diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index 64bb564050..c0bd82841a 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -506,7 +506,14 @@ fn warn_if_mainnet_wasm_not_reproducible( let Ok(spec) = soroban_spec_tools::contract::Spec::new(wasm_bytes) else { return; }; - let required = ["cliver", "bldimg", "rsver", "source_repo", "source_rev"]; + let required = [ + "cliver", + "bldimg", + "rsver", + "source_repo", + "source_rev", + "source_path", + ]; let missing: Vec<&str> = required .iter() .filter(|k| { diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 2f3180e9fc..936af29084 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -69,10 +69,14 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; + let source_path = find_meta(&spec.meta, "source_path"); print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); print.blankln(format!("Docker image: {bldimg}")); + if let Some(p) = &source_path { + print.blankln(format!("Source path: {p}")); + } let running = version::one_line(); if cliver != running { From c319deabcc859648d90accd22159305fd23d16e5 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Wed, 29 Apr 2026 12:42:49 +0000 Subject: [PATCH 45/64] split bldopts into per-field meta entries: bldopt_manifest_path, bldopt_package, bldopt_profile, bldopt_optimize --- .../src/commands/contract/build.rs | 37 ++++++++++--- .../src/commands/contract/deploy/wasm.rs | 4 +- .../src/commands/contract/verify.rs | 52 +++++++++++++++---- 3 files changed, 75 insertions(+), 18 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 80f4a3b30d..07893d1d64 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -352,7 +352,7 @@ impl Cmd { GitState::Clean { repo, rev, root } => (Some(repo), Some(rev), Some(root)), GitState::Dirty => { print.warnln( - "git working tree has uncommitted changes; source_repo/source_rev/source_path not embedded in contract metadata. Commit changes for a reproducible build.", + "git working tree has uncommitted changes; source_repo/source_rev/bldopt_* not embedded in contract metadata. Commit changes for a reproducible build.", ); (None, None, None) } @@ -463,7 +463,7 @@ impl Cmd { .join(&self.profile) .join(&file); - let source_path = git_root.as_deref().and_then(|gr| { + let bldopt_manifest_path = git_root.as_deref().and_then(|gr| { pathdiff::diff_paths(&p.manifest_path, gr) .map(|p| p.to_string_lossy().into_owned()) }); @@ -473,7 +473,10 @@ impl Cmd { bldimg: bldimg.clone(), source_repo: source_repo.clone(), source_rev: source_rev.clone(), - source_path, + bldopt_manifest_path, + bldopt_package: Some(p.name.clone()), + bldopt_profile: Some(self.profile.clone()), + bldopt_optimize: self.build_args.optimize, }, )?; Self::filter_spec(&target_file_path)?; @@ -661,7 +664,20 @@ impl Cmd { ("bldimg", extra.bldimg.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), - ("source_path", extra.source_path.as_deref()), + ( + "bldopt_manifest_path", + extra.bldopt_manifest_path.as_deref(), + ), + ("bldopt_package", extra.bldopt_package.as_deref()), + ("bldopt_profile", extra.bldopt_profile.as_deref()), + ( + "bldopt_optimize", + if extra.bldopt_optimize { + Some("true") + } else { + None + }, + ), ]; for (k, v) in kvs { let Some(v) = v else { continue }; @@ -785,10 +801,15 @@ struct ExtraMeta { /// `source_rev`: full SHA of the workspace's git HEAD commit. /// Set only when the workspace is a clean git checkout. source_rev: Option, - /// `source_path`: the package's `Cargo.toml` path relative to the git - /// repo root, e.g. `contracts/foo/Cargo.toml`. Set only when the - /// workspace is a clean git checkout. - source_path: Option, + /// `bldopt_manifest_path`: package's `Cargo.toml` path relative to the + /// git repo root. Set only when the workspace is a clean git checkout. + bldopt_manifest_path: Option, + /// `bldopt_package`: cargo package name being built. + bldopt_package: Option, + /// `bldopt_profile`: cargo profile (e.g. `release`). + bldopt_profile: Option, + /// `bldopt_optimize`: present (with value `true`) iff `--optimize` was used. + bldopt_optimize: bool, } enum GitState { diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index c0bd82841a..46606737b4 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -512,7 +512,9 @@ fn warn_if_mainnet_wasm_not_reproducible( "rsver", "source_repo", "source_rev", - "source_path", + "bldopt_manifest_path", + "bldopt_package", + "bldopt_profile", ]; let missing: Vec<&str> = required .iter() diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 936af29084..b6d089f74e 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -23,11 +23,6 @@ pub struct Cmd { #[command(flatten)] pub common: shared::Args, - /// Path to Cargo.toml of the source to rebuild. Defaults to the nearest - /// Cargo.toml in the current directory or its parents. - #[arg(long)] - pub manifest_path: Option, - #[command(flatten)] pub container_args: ContainerArgs, } @@ -69,13 +64,23 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - let source_path = find_meta(&spec.meta, "source_path"); + let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path") + .ok_or(Error::MissingMeta("bldopt_manifest_path"))?; + let bldopt_package = + find_meta(&spec.meta, "bldopt_package").ok_or(Error::MissingMeta("bldopt_package"))?; + let bldopt_profile = + find_meta(&spec.meta, "bldopt_profile").ok_or(Error::MissingMeta("bldopt_profile"))?; + let bldopt_optimize = find_meta(&spec.meta, "bldopt_optimize").is_some(); + print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); print.blankln(format!("Docker image: {bldimg}")); - if let Some(p) = &source_path { - print.blankln(format!("Source path: {p}")); + print.blankln(format!("Manifest path: {bldopt_manifest_path}")); + print.blankln(format!("Package: {bldopt_package}")); + print.blankln(format!("Profile: {bldopt_profile}")); + if bldopt_optimize { + print.blankln("Optimize: true"); } let running = version::one_line(); @@ -86,11 +91,30 @@ impl Cmd { }); } + // Resolve the manifest path relative to the cwd's git top-level so + // verify works from anywhere inside the checkout. + let manifest_path = { + let p = PathBuf::from(&bldopt_manifest_path); + if p.is_absolute() { + p + } else if let Some(root) = git_top_level() { + root.join(p) + } else { + p + } + }; + let build_cmd = build::Cmd { - manifest_path: self.manifest_path.clone(), + manifest_path: Some(manifest_path), + package: Some(bldopt_package), + profile: bldopt_profile, backend: build::Backend::Docker { image: bldimg }, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), + build_args: build::BuildArgs { + optimize: bldopt_optimize, + ..build::BuildArgs::default() + }, ..build::Cmd::default() }; let built = build_cmd.run(global_args).await?; @@ -126,6 +150,16 @@ impl Cmd { } } +fn git_top_level() -> Option { + let out = std::process::Command::new("git") + .args(["rev-parse", "--show-toplevel"]) + .output() + .ok()?; + out.status + .success() + .then(|| PathBuf::from(String::from_utf8_lossy(&out.stdout).trim())) +} + fn find_meta(meta: &[ScMetaEntry], key: &str) -> Option { meta.iter().find_map(|entry| { let ScMetaEntry::ScMetaV0(ScMetaV0 { key: k, val }) = entry; From fe07b3678833e07c43235a6caaeccff81e146856 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Thu, 30 Apr 2026 13:09:13 +0000 Subject: [PATCH 46/64] container: mount git repo root when available, fall back to workspace_root --- cmd/soroban-cli/src/commands/contract/build.rs | 10 +++++++--- cmd/soroban-cli/src/commands/contract/build_docker.rs | 10 +++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 07893d1d64..7f29964edb 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -358,6 +358,10 @@ impl Cmd { } GitState::NotARepo => (None, None, None), }; + // Mount the git repo root when available (so cross-workspace path + // dependencies and shared repo files are visible inside the + // container). Fall back to the cargo workspace root otherwise. + let mount_root = git_root.as_deref().unwrap_or(workspace_root); for (i, p) in packages.iter().enumerate() { if i > 0 { @@ -378,8 +382,8 @@ impl Cmd { cmd.arg("--locked"); } let manifest_path = if self.backend.docker_image().is_some() { - // Inside the container the workspace is mounted at /workspace. - let rel = pathdiff::diff_paths(&p.manifest_path, workspace_root) + // Inside the container the mount root is at /workspace. + let rel = pathdiff::diff_paths(&p.manifest_path, mount_root) .unwrap_or(p.manifest_path.clone().into()); Path::new(build_docker::WORK_DIR).join(rel) } else { @@ -437,7 +441,7 @@ impl Cmd { &cmd_str, image, resolved_image.as_deref(), - workspace_root, + mount_root, target_dir.as_std_path(), &wasm_target, self.rustup_toolchain.as_deref(), diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 5ebbbb606b..0fdc7b7c3b 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -61,7 +61,7 @@ pub async fn run_in_docker( cmd_str: &str, image: &str, pre_resolved: Option<&str>, - workspace_root: &Path, + mount_root: &Path, target_dir: &Path, wasm_target: &str, pin_toolchain: Option<&str>, @@ -96,7 +96,7 @@ pub async fn run_in_docker( // container run. let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; let binds = vec![ - format!("{}:{}", workspace_root.display(), WORK_DIR), + format!("{}:{}", mount_root.display(), WORK_DIR), format!("{}:{}", target_dir.display(), TARGET_DIR), format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), ]; @@ -110,7 +110,7 @@ pub async fn run_in_docker( env.push(format!("CARGO_TARGET_DIR={TARGET_DIR}")); env.push(format!( "SOURCE_DATE_EPOCH={}", - source_date_epoch(workspace_root) + source_date_epoch(mount_root) )); // Force cargo to emit color (otherwise cargo detects the non-TTY stdout // and falls back to monochrome). Matches what users see for local builds. @@ -325,10 +325,10 @@ fn current_uid_gid() -> Option { /// Best-effort SOURCE_DATE_EPOCH from the workspace's HEAD commit time; /// falls back to `"0"` when not in a git repo. -fn source_date_epoch(workspace_root: &Path) -> String { +fn source_date_epoch(mount_root: &Path) -> String { Command::new("git") .arg("-C") - .arg(workspace_root) + .arg(mount_root) .args(["log", "-1", "--format=%ct"]) .output() .ok() From edc5397642bb6e53a4eb6c96348493df105ffa69 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:42:43 +1000 Subject: [PATCH 47/64] add tar and http-body-util dependencies --- Cargo.lock | 14 + cmd/crates/soroban-test/tests/it/build.rs | 20 +- cmd/soroban-cli/Cargo.toml | 3 + .../src/commands/contract/build.rs | 206 ++++++++- .../src/commands/contract/build_docker.rs | 19 +- .../src/commands/contract/build_docker_all.rs | 396 ++++++++++++++++++ .../contract/build_docker_all/Dockerfile | 23 + cmd/soroban-cli/src/commands/contract/mod.rs | 1 + .../src/commands/contract/verify.rs | 42 +- 9 files changed, 700 insertions(+), 24 deletions(-) create mode 100644 cmd/soroban-cli/src/commands/contract/build_docker_all.rs create mode 100644 cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile diff --git a/Cargo.lock b/Cargo.lock index b769eb65ec..dba0b2e888 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5377,6 +5377,7 @@ dependencies = [ "async-trait", "base64 0.21.7", "bollard", + "bytes", "bytesize", "cargo_metadata", "chrono", @@ -5396,6 +5397,7 @@ dependencies = [ "heck 0.5.0", "hex", "home", + "http-body-util", "humantime", "indexmap 2.11.0", "itertools 0.10.5", @@ -5437,6 +5439,7 @@ dependencies = [ "strsim", "strum 0.17.1", "strum_macros 0.17.1", + "tar", "tempfile", "termcolor", "termcolor_output", @@ -6086,6 +6089,17 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tar" +version = "0.4.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22692a6476a21fa75fdfc11d452fda482af402c008cdbaf3476414e122040973" +dependencies = [ + "filetime", + "libc", + "xattr", +] + [[package]] name = "temp-dir" version = "0.1.16" diff --git a/cmd/crates/soroban-test/tests/it/build.rs b/cmd/crates/soroban-test/tests/it/build.rs index 1863af976b..aaa749e153 100644 --- a/cmd/crates/soroban-test/tests/it/build.rs +++ b/cmd/crates/soroban-test/tests/it/build.rs @@ -594,12 +594,24 @@ fn get_entries(fixture_path: &Path, outdir: &Path) -> Vec { Limits::none(), )) .filter(|entry| match entry { - // Ignore the meta entries that the SDK embeds that capture the SDK and - // Rust version, since these will change often and are not really - // relevant to this test. + // Ignore SDK-embedded keys (rsver, rssdkver) and stellar-cli-embedded + // build-record keys (bldbkd, bldimg, bldopt_*, source_*) — these + // change often and aren't what these tests are asserting on. Ok(ScMetaEntry::ScMetaV0(ScMetaV0 { key, .. })) => { let key = key.to_string(); - !matches!(key.as_str(), "rsver" | "rssdkver") + !matches!( + key.as_str(), + "rsver" + | "rssdkver" + | "bldbkd" + | "bldimg" + | "bldopt_manifest_path" + | "bldopt_package" + | "bldopt_profile" + | "bldopt_optimize" + | "source_repo" + | "source_rev" + ) } _ => true, }) diff --git a/cmd/soroban-cli/Cargo.toml b/cmd/soroban-cli/Cargo.toml index 56a1b88351..8d8fd11a09 100644 --- a/cmd/soroban-cli/Cargo.toml +++ b/cmd/soroban-cli/Cargo.toml @@ -108,6 +108,9 @@ tempfile = "3.8.1" toml_edit = { workspace = true } rust-embed = { version = "8.2.0", features = ["debug-embed"] } bollard = { workspace = true } +bytes = { workspace = true } +tar = "0.4" +http-body-util = "0.1" futures-util = "0.3.30" futures = "0.3.30" home = "0.5.9" diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 7f29964edb..9ea3279924 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,7 +19,11 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{container::shared::Args as ContainerArgs, contract::build_docker, global, version}, + commands::{ + container::shared::Args as ContainerArgs, + contract::{build_docker, build_docker_all}, + global, version, + }, print::Print, wasm, }; @@ -103,6 +107,11 @@ pub struct Cmd { /// recorded in contract metadata. /// - `docker=`: build inside the specified docker image. Pin /// via `--backend docker=@sha256:...` for fully-reproducible builds. + /// - `docker-all` / `docker-all=`: like `docker`, but layers a + /// stellar-cli install on top of the base image and runs the entire + /// `stellar contract build` (including post-build steps) inside the + /// container. Captures the whole build pipeline in the image so the + /// host stellar-cli version is irrelevant for `verify`. /// /// Aborted docker builds may leave a stopped container; clean with `docker container prune`. #[arg( @@ -119,9 +128,20 @@ pub struct Cmd { /// Run cargo via `cargo +` to pin the rust toolchain. Set by /// `verify` from the wasm's `rsver` meta entry; not user-facing. - #[arg(skip)] + #[arg(long, hide = true)] pub rustup_toolchain: Option, + /// Override `bldimg` meta. Set by `--backend docker-all` when invoking + /// the in-container stellar-cli; not user-facing. + #[arg(long, hide = true)] + pub bldimg: Option, + + /// Override `bldbkd` meta (build backend identifier). Set by + /// `--backend docker-all` when invoking the in-container stellar-cli; + /// not user-facing. + #[arg(long, hide = true)] + pub bldbkd: Option, + #[command(flatten)] pub build_args: BuildArgs, @@ -142,15 +162,19 @@ pub enum Backend { /// Build with the host's rust toolchain. #[default] Local, - /// Build inside a Docker container with the given image. + /// Build cargo inside a Docker container with the given image; post-build + /// steps run on the host. Docker { image: String }, + /// Build inside a stellar-cli image layered on top of the given base + /// image; the entire build pipeline runs inside the container. + DockerAll { image: String }, } impl Backend { - /// Returns the docker image if the backend is `Docker`, else `None`. + /// Returns the docker image if the backend uses one, else `None`. pub fn docker_image(&self) -> Option<&str> { match self { - Self::Docker { image } => Some(image), + Self::Docker { image } | Self::DockerAll { image } => Some(image), Self::Local => None, } } @@ -164,6 +188,9 @@ pub fn parse_backend(s: &str) -> Result { "docker" => Ok(Backend::Docker { image: DEFAULT_DOCKER_IMAGE.to_string(), }), + "docker-all" => Ok(Backend::DockerAll { + image: DEFAULT_DOCKER_IMAGE.to_string(), + }), _ => { if let Some(image) = s.strip_prefix("docker=") { if image.is_empty() { @@ -172,9 +199,16 @@ pub fn parse_backend(s: &str) -> Result { Ok(Backend::Docker { image: image.to_string(), }) + } else if let Some(image) = s.strip_prefix("docker-all=") { + if image.is_empty() { + return Err("docker-all image cannot be empty; use `--backend docker-all` for the default image".to_string()); + } + Ok(Backend::DockerAll { + image: image.to_string(), + }) } else { Err(format!( - "unknown backend {s:?}; expected `local`, `docker`, or `docker=`" + "unknown backend {s:?}; expected `local`, `docker`, `docker=`, `docker-all`, or `docker-all=`" )) } } @@ -300,6 +334,8 @@ impl Default for Cmd { backend: Backend::Local, container_args: ContainerArgs { docker_host: None }, rustup_toolchain: None, + bldimg: None, + bldbkd: None, build_args: BuildArgs::default(), action: None, } @@ -363,6 +399,24 @@ impl Cmd { // container). Fall back to the cargo workspace root otherwise. let mount_root = git_root.as_deref().unwrap_or(workspace_root); + // `--backend docker-all` orchestrates a *layered* image build (base + // rust image + wasm target + stellar-cli installed at the host's + // commit sha) and runs the entire build pipeline inside it. The host + // skips the cargo + post-processing loop below. + if let Backend::DockerAll { image } = &self.backend { + return self + .run_docker_all( + image, + &print, + &packages, + target_dir.as_std_path(), + workspace_root, + mount_root, + &wasm_target, + ) + .await; + } + for (i, p) in packages.iter().enumerate() { if i > 0 { // Blank line separating successive contract builds in a workspace. @@ -457,7 +511,11 @@ impl Cmd { if !status.success() { return Err(Error::Exit(status)); } - None + // When invoked from inside a `--backend docker-all` + // container, the outer host passes `--bldimg` so the + // in-container build still records the original base + // image digest in meta. + self.bldimg.clone() }; let wasm_name = p.name.replace('-', "_"); @@ -471,10 +529,26 @@ impl Cmd { pathdiff::diff_paths(&p.manifest_path, gr) .map(|p| p.to_string_lossy().into_owned()) }); + let bldbkd = self.bldbkd.clone().unwrap_or_else(|| { + match &self.backend { + Backend::Local => "local", + Backend::Docker { .. } => "docker", + // `Backend::DockerAll` returns early in `run` before + // reaching this loop; it never injects meta itself. + // The in-container `Backend::Local` invocation is + // what actually writes the wasm, with `--bldbkd + // docker-all` set so `self.bldbkd` is `Some` above. + Backend::DockerAll { .. } => { + unreachable!("DockerAll returns before inject_meta") + } + } + .to_string() + }); self.inject_meta( &target_file_path, &ExtraMeta { bldimg: bldimg.clone(), + bldbkd: Some(bldbkd), source_repo: source_repo.clone(), source_rev: source_rev.clone(), bldopt_manifest_path, @@ -532,6 +606,95 @@ impl Cmd { Ok(built_contracts) } + /// Orchestrate a `--backend docker-all` build: build a stellar-cli image + /// layered on top of the chosen base, then run the full + /// `stellar contract build --backend local` pipeline inside it. + /// + /// The host doesn't run cargo or post-processing itself — those are done + /// by the in-container stellar-cli, against the bind-mounted source and + /// target directories. + #[allow(clippy::too_many_arguments)] + async fn run_docker_all( + &self, + image: &str, + print: &Print, + packages: &[Package], + target_dir: &Path, + workspace_root: &Path, + mount_root: &Path, + wasm_target: &str, + ) -> Result, Error> { + // Pick the stellar-cli rev to install in the layered image. We + // require the host CLI to have been built from a commit so the same + // version can be installed inside the container — guaranteeing + // `cliver` (recorded by the in-container build) matches the host's. + let cli_rev = build_docker_all::extract_full_sha(version::git())?; + + // The user's --manifest-path (if any) is a host path; translate to + // the in-container `/workspace/...` form. If absent, fall back to + // the workspace root's Cargo.toml. + let host_manifest = self + .manifest_path + .clone() + .unwrap_or_else(|| workspace_root.join("Cargo.toml")); + let rel = pathdiff::diff_paths(&host_manifest, mount_root) + .unwrap_or_else(|| host_manifest.clone()); + let in_container_manifest = Path::new(build_docker::WORK_DIR) + .join(rel) + .to_string_lossy() + .into_owned(); + + let inner = build_docker_all::InnerBuildArgs { + manifest_path: in_container_manifest, + package: self.package.as_deref(), + profile: &self.profile, + features: self.features.as_deref(), + all_features: self.all_features, + no_default_features: self.no_default_features, + optimize: self.build_args.optimize, + meta: &self.build_args.meta, + rustup_toolchain: self.rustup_toolchain.as_deref(), + }; + + build_docker_all::run_in_docker_all( + image, + &cli_rev, + mount_root, + target_dir, + wasm_target, + &inner, + &self.container_args, + print, + ) + .await?; + + // The in-container build wrote its outputs to the bind-mounted + // target dir (and optionally copied to --out-dir, but the inner + // doesn't see --out-dir; we copy here on the host side). + let mut built = Vec::with_capacity(packages.len()); + for p in packages { + let wasm_name = p.name.replace('-', "_"); + let file = format!("{wasm_name}.wasm"); + let target_file_path = target_dir.join(wasm_target).join(&self.profile).join(&file); + + let final_path = if let Some(out_dir) = &self.out_dir { + fs::create_dir_all(out_dir).map_err(Error::CreatingOutDir)?; + let out_file_path = out_dir.join(&file); + fs::copy(&target_file_path, &out_file_path) + .map_err(Error::CopyingWasmFile)?; + out_file_path + } else { + target_file_path + }; + + built.push(BuiltContract { + name: p.name.clone(), + path: final_path, + }); + } + Ok(built) + } + fn features(&self) -> Option> { self.features .as_ref() @@ -666,6 +829,7 @@ impl Cmd { // clean git checkout. let kvs = [ ("bldimg", extra.bldimg.as_deref()), + ("bldbkd", extra.bldbkd.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), ( @@ -799,6 +963,10 @@ struct ExtraMeta { /// `bldimg`: fully-qualified container image used to build (e.g. /// `docker.io/library/rust@sha256:...`). Set when `--backend docker`. bldimg: Option, + /// `bldbkd`: build backend identifier — one of `local`, `docker`, or + /// `docker-all`. Always recorded so anyone inspecting the wasm can see + /// which build path produced it. + bldbkd: Option, /// `source_repo`: HTTPS URL of the workspace's git origin remote. /// Set only when the workspace is a clean git checkout. source_repo: Option, @@ -1209,4 +1377,28 @@ mod tests { } assert_eq!(remote_to_https("notaurl"), None); } + + #[test] + fn parse_backend_recognizes_docker_all() { + assert!(matches!(parse_backend("local"), Ok(Backend::Local))); + assert!(matches!( + parse_backend("docker"), + Ok(Backend::Docker { .. }) + )); + assert!(matches!( + parse_backend("docker=quay.io/foo/bar:tag"), + Ok(Backend::Docker { .. }) + )); + assert!(matches!( + parse_backend("docker-all"), + Ok(Backend::DockerAll { .. }) + )); + let parsed = parse_backend("docker-all=quay.io/foo/bar:tag").unwrap(); + match parsed { + Backend::DockerAll { image } => assert_eq!(image, "quay.io/foo/bar:tag"), + other => panic!("expected DockerAll, got {other:?}"), + } + assert!(parse_backend("docker-all=").is_err()); + assert!(parse_backend("nonsense").is_err()); + } } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 0fdc7b7c3b..ab8c572350 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -51,6 +51,15 @@ pub enum Error { #[error("resolving CARGO_HOME / RUSTUP_HOME: {0}")] CargoHome(std::io::Error), + + #[error("building stellar-cli image: {0}")] + ImageBuild(String), + + #[error("packaging Dockerfile context: {0}")] + Tar(std::io::Error), + + #[error("host stellar-cli has no commit sha to install in container; rebuild from a git checkout (or install via `cargo install --git ... --rev `)")] + NoHostCliRev, } /// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, @@ -213,7 +222,7 @@ async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> Ok(()) } -async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { +pub(super) async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { let mut stream = docker.create_image( Some(CreateImageOptions { from_image: Some(image.to_string()), @@ -250,7 +259,7 @@ async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), E // Returns a fully-qualified `/@sha256:` reference so // that `verify` on a different machine can resolve it without depending on // local registry config. -async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { +pub(super) async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { let canonical = fully_qualify(strip_tag(image)); let digest = if let Some(d) = sha256_digest(image) { d.to_string() @@ -311,7 +320,7 @@ fn fully_qualify(name: &str) -> String { #[allow(clippy::unnecessary_wraps)] #[cfg(unix)] -fn current_uid_gid() -> Option { +pub(super) fn current_uid_gid() -> Option { // SAFETY: getuid/getgid are infallible POSIX calls. Some(format!("{}:{}", unsafe { libc::getuid() }, unsafe { libc::getgid() @@ -319,13 +328,13 @@ fn current_uid_gid() -> Option { } #[cfg(not(unix))] -fn current_uid_gid() -> Option { +pub(super) fn current_uid_gid() -> Option { None } /// Best-effort SOURCE_DATE_EPOCH from the workspace's HEAD commit time; /// falls back to `"0"` when not in a git repo. -fn source_date_epoch(mount_root: &Path) -> String { +pub(super) fn source_date_epoch(mount_root: &Path) -> String { Command::new("git") .arg("-C") .arg(mount_root) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs new file mode 100644 index 0000000000..2397bc419f --- /dev/null +++ b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs @@ -0,0 +1,396 @@ +//! `--backend docker-all` build backend. +//! +//! Layers a stellar-cli install on top of the user's chosen rust base image, +//! then runs the entire `stellar contract build --backend local` pipeline +//! inside the container — including the post-build steps (meta injection, +//! spec filtering, optimization). The host-side stellar-cli only orchestrates. +//! +//! Reproducibility: `bldimg` records the *base* rust image digest (same as +//! `--backend docker`), `cliver` records the stellar-cli version installed +//! into the layered image, and `rsver` is recorded by soroban-sdk. Together +//! these three reconstruct the layered image on `verify`. + +use std::collections::HashMap; +use std::path::Path; + +use bollard::{ + models::ContainerCreateBody, + query_parameters::{ + BuildImageOptionsBuilder, CreateContainerOptions, LogsOptions, RemoveContainerOptions, + StartContainerOptions, WaitContainerOptions, + }, + service::HostConfig, + Docker, +}; +use bytes::Bytes; +use futures_util::StreamExt; +use http_body_util::{Either, Full}; + +use super::build_docker::{ + current_uid_gid, pull_image, resolve_image_digest, source_date_epoch, Error, +}; +use crate::{commands::container::shared::Args as ContainerArgs, print::Print}; + +const DOCKERFILE: &str = include_str!("build_docker_all/Dockerfile"); +const STELLAR_CLI_REPO: &str = "https://github.com/stellar/stellar-cli"; + +/// Where the workspace and target are mounted inside the container, and where +/// the cargo registry cache lives. The first two are bind mounts shared with +/// the host; the registry cache is a bind mount of the host's cargo registry. +const WORK_DIR: &str = "/workspace"; +const TARGET_DIR: &str = "/target"; +const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; +const PLATFORM: &str = "linux/amd64"; + +/// Forwarded host build args used to construct the inner +/// `stellar contract build --backend local` invocation. `manifest_path` is +/// expected to already be in container-relative form (`/workspace/...`). +pub struct InnerBuildArgs<'a> { + pub manifest_path: String, + pub package: Option<&'a str>, + pub profile: &'a str, + pub features: Option<&'a str>, + pub all_features: bool, + pub no_default_features: bool, + pub optimize: bool, + pub meta: &'a [(String, String)], + pub rustup_toolchain: Option<&'a str>, +} + +/// Pull the base image, build a layered stellar-cli image on top of it, then +/// run `stellar contract build --backend local --bldimg --bldbkd docker-all` +/// inside that layered image. Returns the resolved base image reference for +/// embedding into `bldimg`. +#[allow(clippy::too_many_arguments)] +pub async fn run_in_docker_all( + base_image: &str, + cli_rev: &str, + mount_root: &Path, + target_dir: &Path, + wasm_target: &str, + inner: &InnerBuildArgs<'_>, + container_args: &ContainerArgs, + print: &Print, +) -> Result { + let docker: Docker = container_args + .connect_to_docker(print) + .await + .map_err(Error::RuntimeNotRunning)?; + + pull_image(&docker, base_image, print).await?; + let base_resolved = resolve_image_digest(&docker, base_image).await?; + + let layered_tag = format!("stellar-cli-build:{}", short_hash(&base_resolved, cli_rev)); + print.infoln(format!( + "Building stellar-cli build image {layered_tag} (base {base_resolved}, stellar-cli {cli_rev})" + )); + build_layered_image( + &docker, + &base_resolved, + cli_rev, + wasm_target, + &layered_tag, + print, + ) + .await?; + + print.infoln(format_inner_cmd(inner, &base_resolved)); + + run_inner_build( + &docker, + &layered_tag, + &base_resolved, + inner, + mount_root, + target_dir, + ) + .await?; + + Ok(base_resolved) +} + +/// Build the layered image (FROM base + rustup target + cargo install stellar-cli). +async fn build_layered_image( + docker: &Docker, + base_image: &str, + cli_rev: &str, + wasm_target: &str, + tag: &str, + print: &Print, +) -> Result<(), Error> { + let context = build_tar_context()?; + + let mut buildargs: HashMap = HashMap::new(); + buildargs.insert("BASE_IMAGE".to_string(), base_image.to_string()); + buildargs.insert("WASM_TARGET".to_string(), wasm_target.to_string()); + buildargs.insert( + "STELLAR_CLI_REPO".to_string(), + STELLAR_CLI_REPO.to_string(), + ); + buildargs.insert("STELLAR_CLI_REV".to_string(), cli_rev.to_string()); + + let options = BuildImageOptionsBuilder::default() + .dockerfile("Dockerfile") + .t(tag) + .platform(PLATFORM) + .buildargs(&buildargs) + .rm(true) + .build(); + + let body = Either::Left(Full::new(context)); + let mut stream = docker.build_image(options, None, Some(body)); + while let Some(item) = stream.next().await { + let info = item?; + if let Some(s) = info.stream { + let s = s.trim_end_matches('\n'); + if !s.is_empty() { + print.blankln(s); + } + } + if let Some(detail) = info.error_detail { + return Err(Error::ImageBuild( + detail.message.unwrap_or_else(|| "unknown".to_string()), + )); + } + } + Ok(()) +} + +/// Construct an in-memory tar containing only the embedded Dockerfile. +fn build_tar_context() -> Result { + let dockerfile = DOCKERFILE.as_bytes(); + let mut buf = Vec::new(); + { + let mut builder = tar::Builder::new(&mut buf); + let mut header = tar::Header::new_gnu(); + header.set_path("Dockerfile").map_err(Error::Tar)?; + header.set_size(dockerfile.len() as u64); + header.set_mode(0o644); + header.set_cksum(); + builder.append(&header, dockerfile).map_err(Error::Tar)?; + builder.finish().map_err(Error::Tar)?; + } + Ok(Bytes::from(buf)) +} + +/// Run the in-container `stellar contract build --backend local --bldimg ... --bldbkd docker-all`. +async fn run_inner_build( + docker: &Docker, + layered_tag: &str, + base_resolved: &str, + inner: &InnerBuildArgs<'_>, + mount_root: &Path, + target_dir: &Path, +) -> Result<(), Error> { + let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; + let binds = vec![ + format!("{}:{}", mount_root.display(), WORK_DIR), + format!("{}:{}", target_dir.display(), TARGET_DIR), + format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), + ]; + + let env = vec![ + format!("CARGO_TARGET_DIR={TARGET_DIR}"), + format!("SOURCE_DATE_EPOCH={}", source_date_epoch(mount_root)), + "CARGO_TERM_COLOR=always".to_string(), + ]; + + let argv = build_inner_argv(inner, base_resolved); + + let config = ContainerCreateBody { + image: Some(layered_tag.to_string()), + cmd: Some(argv), + env: Some(env), + working_dir: Some(WORK_DIR.to_string()), + user: current_uid_gid(), + attach_stdout: Some(true), + attach_stderr: Some(true), + host_config: Some(HostConfig { + binds: Some(binds), + auto_remove: Some(false), + ..Default::default() + }), + ..Default::default() + }; + + let container_id = docker + .create_container(None::, config) + .await? + .id; + + let result = stream_and_wait(docker, &container_id).await; + + let _ = docker + .remove_container( + &container_id, + Some(RemoveContainerOptions { + force: true, + ..Default::default() + }), + ) + .await; + + result +} + +async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { + docker + .start_container(container_id, None::) + .await?; + + let mut log_stream = docker.logs( + container_id, + Some(LogsOptions { + follow: true, + stdout: true, + stderr: true, + ..Default::default() + }), + ); + while let Some(item) = log_stream.next().await { + let s = item?.to_string(); + let s = s.trim_end_matches('\n'); + if !s.is_empty() { + eprintln!("{s}"); + } + } + + let mut wait_stream = docker.wait_container(container_id, None::); + let mut exit_code: i64 = 0; + while let Some(res) = wait_stream.next().await { + match res { + Ok(r) => exit_code = r.status_code, + Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => exit_code = code, + Err(e) => return Err(Error::Runtime(e)), + } + } + if exit_code != 0 { + return Err(Error::BuildExit(exit_code)); + } + Ok(()) +} + +/// Build the argv for the in-container `stellar contract build --backend local`. +fn build_inner_argv(inner: &InnerBuildArgs<'_>, base_resolved: &str) -> Vec { + let mut argv: Vec = vec![ + "stellar".to_string(), + "contract".to_string(), + "build".to_string(), + "--backend".to_string(), + "local".to_string(), + "--bldimg".to_string(), + base_resolved.to_string(), + "--bldbkd".to_string(), + "docker-all".to_string(), + "--manifest-path".to_string(), + inner.manifest_path.clone(), + "--profile".to_string(), + inner.profile.to_string(), + // Always --locked so the inner build is deterministic. + "--locked".to_string(), + ]; + if let Some(p) = inner.package { + argv.push("--package".to_string()); + argv.push(p.to_string()); + } + if let Some(f) = inner.features { + argv.push("--features".to_string()); + argv.push(f.to_string()); + } + if inner.all_features { + argv.push("--all-features".to_string()); + } + if inner.no_default_features { + argv.push("--no-default-features".to_string()); + } + if inner.optimize { + argv.push("--optimize".to_string()); + } + for (k, v) in inner.meta { + argv.push("--meta".to_string()); + argv.push(format!("{k}={v}")); + } + if let Some(t) = inner.rustup_toolchain { + argv.push("--rustup-toolchain".to_string()); + argv.push(t.to_string()); + } + argv +} + +/// Stable short tag suffix from `(base_image, cli_rev)`. +fn short_hash(base_image: &str, cli_rev: &str) -> String { + use sha2::{Digest, Sha256}; + let mut h = Sha256::new(); + h.update(base_image.as_bytes()); + h.update(b"\0"); + h.update(cli_rev.as_bytes()); + let digest = h.finalize(); + hex::encode(&digest[..8]) +} + +/// One-line preview of the inner cargo command, for the `ℹ︎ ...` info log. +fn format_inner_cmd(inner: &InnerBuildArgs<'_>, base_resolved: &str) -> String { + build_inner_argv(inner, base_resolved).join(" ") +} + +/// Reduce a host stellar-cli git revision string to a 40-char commit sha. +/// +/// `crate_git_revision` produces three shapes depending on how the host CLI +/// was installed (homebrew/crates.io/cargo-git). Only the full sha can be +/// passed to `cargo install --rev` reliably. (See issue #2535 for the +/// in-progress normalization of this rendering.) +pub fn extract_full_sha(git: &str) -> Result { + if git.is_empty() { + return Err(Error::NoHostCliRev); + } + if is_full_sha(git) { + return Ok(git.to_string()); + } + if let Some(idx) = git.rfind("-g") { + let candidate = &git[idx + 2..]; + if is_full_sha(candidate) { + return Ok(candidate.to_string()); + } + } + Err(Error::NoHostCliRev) +} + +fn is_full_sha(s: &str) -> bool { + s.len() == 40 && s.chars().all(|c| c.is_ascii_hexdigit()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extract_full_sha_full() { + let sha = "60f7458e7ecffddf2f2d91dc6d0d2db4fab03ecc"; + assert_eq!(extract_full_sha(sha).unwrap(), sha); + } + + #[test] + fn extract_full_sha_describe() { + let s = "v20.0.0-836-gfe07b3678833e07c43235a6caaeccff81e146856"; + let want = "fe07b3678833e07c43235a6caaeccff81e146856"; + assert_eq!(extract_full_sha(s).unwrap(), want); + } + + #[test] + fn extract_full_sha_empty_errors() { + assert!(matches!(extract_full_sha(""), Err(Error::NoHostCliRev))); + } + + #[test] + fn extract_full_sha_short_errors() { + assert!(matches!(extract_full_sha("abc"), Err(Error::NoHostCliRev))); + } + + #[test] + fn short_hash_is_deterministic_and_short() { + let a = short_hash("docker.io/library/rust@sha256:abc", "deadbeef"); + let b = short_hash("docker.io/library/rust@sha256:abc", "deadbeef"); + assert_eq!(a, b); + assert_eq!(a.len(), 16); + } +} diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile b/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile new file mode 100644 index 0000000000..35ae89f5ab --- /dev/null +++ b/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile @@ -0,0 +1,23 @@ +# syntax=docker/dockerfile:1 +# +# Build image used by `stellar contract build --backend docker-all`. +# +# Layered on top of a user-chosen rust base image so the rust toolchain stays +# pinnable to whatever the user trusts. The stellar-cli is installed inside +# at the same git revision the host stellar-cli was built from, so the entire +# build pipeline (cargo + meta injection + spec filtering + optimize) runs +# inside the container and the image captures everything except the source. + +ARG BASE_IMAGE +FROM ${BASE_IMAGE} + +ARG WASM_TARGET +RUN rustup target add ${WASM_TARGET} + +ARG STELLAR_CLI_REPO +ARG STELLAR_CLI_REV +RUN cargo install \ + --locked \ + --git ${STELLAR_CLI_REPO} \ + --rev ${STELLAR_CLI_REV} \ + stellar-cli diff --git a/cmd/soroban-cli/src/commands/contract/mod.rs b/cmd/soroban-cli/src/commands/contract/mod.rs index 602e107972..3cb344de6e 100644 --- a/cmd/soroban-cli/src/commands/contract/mod.rs +++ b/cmd/soroban-cli/src/commands/contract/mod.rs @@ -4,6 +4,7 @@ pub mod asset; pub mod bindings; pub mod build; pub mod build_docker; +pub mod build_docker_all; pub mod deploy; pub mod extend; pub mod fetch; diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index b6d089f74e..61e3a9fb0c 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -48,6 +48,10 @@ pub enum Error { }, #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), + #[error("contract was built with `--backend local` (bldbkd=local); local builds are not reproducible. Rebuild with `--backend docker` or `--backend docker-all` to make it verifiable.")] + LocalBackend, + #[error("unknown bldbkd value '{0}'")] + UnknownBackend(String), } impl Cmd { @@ -63,6 +67,7 @@ impl Cmd { let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; + let bldbkd = find_meta(&spec.meta, "bldbkd"); let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path") .ok_or(Error::MissingMeta("bldopt_manifest_path"))?; @@ -76,6 +81,10 @@ impl Cmd { print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); print.blankln(format!("Docker image: {bldimg}")); + print.blankln(format!( + "Build backend: {}", + bldbkd.as_deref().unwrap_or("docker") + )); print.blankln(format!("Manifest path: {bldopt_manifest_path}")); print.blankln(format!("Package: {bldopt_package}")); print.blankln(format!("Profile: {bldopt_profile}")); @@ -83,13 +92,30 @@ impl Cmd { print.blankln("Optimize: true"); } - let running = version::one_line(); - if cliver != running { - return Err(Error::CliVersionMismatch { - expected: cliver, - actual: running, - }); - } + // For docker-all builds the in-container stellar-cli is what built + // the wasm, so the host version doesn't have to match. For docker + // (host post-processing), the host stellar-cli is part of the build + // pipeline and a mismatch means the rebuild will diverge. Legacy + // wasms with no bldbkd are treated as `docker`. + let backend = match bldbkd.as_deref().unwrap_or("docker") { + "docker-all" => build::Backend::DockerAll { + image: bldimg.clone(), + }, + "docker" => { + let running = version::one_line(); + if cliver != running { + return Err(Error::CliVersionMismatch { + expected: cliver, + actual: running, + }); + } + build::Backend::Docker { + image: bldimg.clone(), + } + } + "local" => return Err(Error::LocalBackend), + other => return Err(Error::UnknownBackend(other.to_string())), + }; // Resolve the manifest path relative to the cwd's git top-level so // verify works from anywhere inside the checkout. @@ -108,7 +134,7 @@ impl Cmd { manifest_path: Some(manifest_path), package: Some(bldopt_package), profile: bldopt_profile, - backend: build::Backend::Docker { image: bldimg }, + backend, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), build_args: build::BuildArgs { From 42351b15886f5d9bcccee10f26c66ab16f80b5b7 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 13:58:58 +1000 Subject: [PATCH 48/64] update crate git revision handling --- Cargo.lock | 24 +++++--- cmd/soroban-cli/Cargo.toml | 2 +- .../src/commands/contract/build.rs | 14 ++++- .../src/commands/contract/build_docker_all.rs | 57 ++++++++++++------- .../src/commands/contract/verify.rs | 1 + 5 files changed, 66 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dba0b2e888..4033c3a083 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1297,6 +1297,16 @@ dependencies = [ "serde_json", ] +[[package]] +name = "crate-git-revision" +version = "0.0.6" +source = "git+https://github.com/stellar/crate-git-revision?branch=dirty-untracked-files#43cb856d8f136c1cb5b9ab232e441b9fd776e849" +dependencies = [ + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -5383,7 +5393,7 @@ dependencies = [ "chrono", "clap", "clap_complete", - "crate-git-revision", + "crate-git-revision 0.0.6 (git+https://github.com/stellar/crate-git-revision?branch=dirty-untracked-files)", "csv", "directories", "dotenvy", @@ -5469,7 +5479,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08582c2c21bd3f7b737bcb76db9d4ca473f8349d65f8952a50eeed8823f44aef" dependencies = [ "arbitrary", - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "ethnum", "num-derive", "num-traits", @@ -5569,7 +5579,7 @@ checksum = "bdc156a0183eb584e57d45f63f3bd7023165980131d6eecc939fe5cda2490c63" dependencies = [ "arbitrary", "bytes-lit", - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "ctor", "derive_arbitrary", "ed25519-dalek", @@ -5863,7 +5873,7 @@ version = "0.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee1832fb50c651ad10f734aaf5d31ca5acdfb197a6ecda64d93fcdb8885af913" dependencies = [ - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "data-encoding", ] @@ -5874,7 +5884,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97e1a364048067bfcd24e6f1a93bba43eeb79c16b854596c841c3e8bab0bfa0c" dependencies = [ "clap", - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "data-encoding", "serde", "serde_json", @@ -5888,7 +5898,7 @@ version = "0.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "084afcb0d458c3d5d5baa2d294b18f881e62cc258ef539d8fdf68be7dbe45520" dependencies = [ - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "data-encoding", "heapless", ] @@ -5903,7 +5913,7 @@ dependencies = [ "base64 0.22.1", "cfg_eval", "clap", - "crate-git-revision", + "crate-git-revision 0.0.6 (registry+https://github.com/rust-lang/crates.io-index)", "escape-bytes", "ethnum", "hex", diff --git a/cmd/soroban-cli/Cargo.toml b/cmd/soroban-cli/Cargo.toml index 8d8fd11a09..688c614761 100644 --- a/cmd/soroban-cli/Cargo.toml +++ b/cmd/soroban-cli/Cargo.toml @@ -134,7 +134,7 @@ rustc_version = "0.4.1" libc = "0.2" [build-dependencies] -crate-git-revision = "0.0.6" +crate-git-revision = { git = "https://github.com/stellar/crate-git-revision", branch = "dirty-untracked-files" } serde.workspace = true thiserror.workspace = true diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 9ea3279924..c1684736a5 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -626,9 +626,17 @@ impl Cmd { ) -> Result, Error> { // Pick the stellar-cli rev to install in the layered image. We // require the host CLI to have been built from a commit so the same - // version can be installed inside the container — guaranteeing - // `cliver` (recorded by the in-container build) matches the host's. - let cli_rev = build_docker_all::extract_full_sha(version::git())?; + // version can be installed inside the container — the in-container + // build records that exact rev in `cliver`. If the host CLI was + // itself built from a dirty working tree, warn but proceed: the + // wasm will reflect the *clean* commit (whatever's pushed to + // origin), not the host's local diff. + let (cli_rev, host_dirty) = build_docker_all::extract_full_sha(version::git())?; + if host_dirty { + print.warnln(format!( + "host stellar-cli was built from a dirty working tree at {cli_rev}; the layered image will install the clean commit and the resulting wasm will not match a build from the host's local diff" + )); + } // The user's --manifest-path (if any) is a host path; translate to // the in-container `/workspace/...` form. If absent, fall back to diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs index 2397bc419f..43ebc691f4 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs @@ -333,26 +333,30 @@ fn format_inner_cmd(inner: &InnerBuildArgs<'_>, base_resolved: &str) -> String { build_inner_argv(inner, base_resolved).join(" ") } -/// Reduce a host stellar-cli git revision string to a 40-char commit sha. +/// Reduce a host stellar-cli git revision string to a 40-char commit sha +/// plus a dirty flag. /// -/// `crate_git_revision` produces three shapes depending on how the host CLI -/// was installed (homebrew/crates.io/cargo-git). Only the full sha can be -/// passed to `cargo install --rev` reliably. (See issue #2535 for the -/// in-progress normalization of this rendering.) -pub fn extract_full_sha(git: &str) -> Result { +/// `crate_git_revision` (stellar's fork) emits one of two shapes: +/// - `<40-char-sha>` for a clean working tree +/// - `<40-char-sha>-dirty` for a working tree with uncommitted changes +/// +/// The bare sha is what `cargo install --git --rev` needs. Callers should +/// warn the user when the dirty flag is set: the layered image will install +/// the *clean* commit, so the resulting wasm won't match what a clean host +/// CLI would have produced. +pub fn extract_full_sha(git: &str) -> Result<(String, bool), Error> { if git.is_empty() { return Err(Error::NoHostCliRev); } - if is_full_sha(git) { - return Ok(git.to_string()); - } - if let Some(idx) = git.rfind("-g") { - let candidate = &git[idx + 2..]; - if is_full_sha(candidate) { - return Ok(candidate.to_string()); - } + let (sha, dirty) = match git.strip_suffix("-dirty") { + Some(s) => (s, true), + None => (git, false), + }; + if is_full_sha(sha) { + Ok((sha.to_string(), dirty)) + } else { + Err(Error::NoHostCliRev) } - Err(Error::NoHostCliRev) } fn is_full_sha(s: &str) -> bool { @@ -364,16 +368,19 @@ mod tests { use super::*; #[test] - fn extract_full_sha_full() { + fn extract_full_sha_clean() { let sha = "60f7458e7ecffddf2f2d91dc6d0d2db4fab03ecc"; - assert_eq!(extract_full_sha(sha).unwrap(), sha); + assert_eq!(extract_full_sha(sha).unwrap(), (sha.to_string(), false)); } #[test] - fn extract_full_sha_describe() { - let s = "v20.0.0-836-gfe07b3678833e07c43235a6caaeccff81e146856"; - let want = "fe07b3678833e07c43235a6caaeccff81e146856"; - assert_eq!(extract_full_sha(s).unwrap(), want); + fn extract_full_sha_dirty() { + let sha = "edc5397642bb6e53a4eb6c96348493df105ffa69"; + let input = format!("{sha}-dirty"); + assert_eq!( + extract_full_sha(&input).unwrap(), + (sha.to_string(), true) + ); } #[test] @@ -386,6 +393,14 @@ mod tests { assert!(matches!(extract_full_sha("abc"), Err(Error::NoHostCliRev))); } + #[test] + fn extract_full_sha_describe_form_no_longer_supported() { + // `crate_git_revision` (stellar's fork) emits only `` or + // `-dirty` now; the legacy git-describe form should error. + let s = "v20.0.0-836-gfe07b3678833e07c43235a6caaeccff81e146856"; + assert!(matches!(extract_full_sha(s), Err(Error::NoHostCliRev))); + } + #[test] fn short_hash_is_deterministic_and_short() { let a = short_hash("docker.io/library/rust@sha256:abc", "deadbeef"); diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 61e3a9fb0c..871469d5be 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -55,6 +55,7 @@ pub enum Error { } impl Cmd { + #[allow(clippy::too_many_lines)] pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> { let print = Print::new(global_args.quiet); From c5e982a88809785226f2372b7bb741dd9e2733ad Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 15:30:28 +1000 Subject: [PATCH 49/64] stream build output directly --- .../src/commands/contract/build_docker_all.rs | 21 +++++++------------ .../contract/build_docker_all/Dockerfile | 18 ++++++++++++++++ 2 files changed, 25 insertions(+), 14 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs index 43ebc691f4..35d34cecf2 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs @@ -84,15 +84,7 @@ pub async fn run_in_docker_all( print.infoln(format!( "Building stellar-cli build image {layered_tag} (base {base_resolved}, stellar-cli {cli_rev})" )); - build_layered_image( - &docker, - &base_resolved, - cli_rev, - wasm_target, - &layered_tag, - print, - ) - .await?; + build_layered_image(&docker, &base_resolved, cli_rev, wasm_target, &layered_tag).await?; print.infoln(format_inner_cmd(inner, &base_resolved)); @@ -116,7 +108,6 @@ async fn build_layered_image( cli_rev: &str, wasm_target: &str, tag: &str, - print: &Print, ) -> Result<(), Error> { let context = build_tar_context()?; @@ -141,11 +132,13 @@ async fn build_layered_image( let mut stream = docker.build_image(options, None, Some(body)); while let Some(item) = stream.next().await { let info = item?; + // Stream the build output verbatim. Docker emits each step header, + // each cargo progress line, and each compile message as its own + // chunk with its own line ending — re-formatting (trim, indent, + // print.blankln) makes the cargo install output unreadable. Match + // what `run_inner_build` does and trust the chunks. if let Some(s) = info.stream { - let s = s.trim_end_matches('\n'); - if !s.is_empty() { - print.blankln(s); - } + eprint!("{s}"); } if let Some(detail) = info.error_detail { return Err(Error::ImageBuild( diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile b/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile index 35ae89f5ab..e545ca1b45 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile +++ b/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile @@ -11,6 +11,24 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} +# stellar-cli's default `additional-libs` feature bundles `wasm-opt`, +# `keyring`, and `stellar-ledger`. On Linux these pull in C deps: +# - keyring → libdbus-sys → libdbus-1-dev +# - stellar-ledger → hidapi → libudev-dev +# - common → openssl-sys → libssl-dev +# We don't use keyring or ledger inside this container — it only runs +# `stellar contract build` — but the binary still has to compile. Installing +# the dev headers is the simplest path; a future cleanup is to split +# `additional-libs` into separate features so docker-all can install with +# just `--features wasm-opt`. +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + libdbus-1-dev \ + libudev-dev \ + libssl-dev \ + pkg-config && \ + rm -rf /var/lib/apt/lists/* + ARG WASM_TARGET RUN rustup target add ${WASM_TARGET} From c8a10008648e04582d89e5a1c016ad48b779bdea Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 16:00:11 +1000 Subject: [PATCH 50/64] refactor verify to use backend metadata --- .../src/commands/contract/verify.rs | 55 ++++++++++++------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 871469d5be..bd22088482 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -12,11 +12,17 @@ use crate::commands::container::shared::Args as ContainerArgs; use crate::commands::{global, version}; use crate::print::Print; -/// Verify a wasm by rebuilding it inside the Docker image recorded in its metadata. +/// Verify a wasm by rebuilding it with the same backend recorded in its metadata. /// /// All cdylib contracts in the workspace are rebuilt; verification succeeds if /// any rebuilt artifact is byte-identical to the input. The user is responsible /// for checking out the matching commit before running. +/// +/// `bldbkd: docker` and `bldbkd: docker-all` rebuild inside the recorded image. +/// `bldbkd: local` rebuilds with the host rust toolchain pinned to the wasm's +/// `rsver` — this is best-effort: local builds depend on environment factors +/// (system libs, paths, env vars) that aren't captured in meta, so a verify +/// match is informative but not as strong a guarantee as the docker backends. #[derive(Parser, Debug, Clone)] #[group(skip)] pub struct Cmd { @@ -48,8 +54,6 @@ pub enum Error { }, #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), - #[error("contract was built with `--backend local` (bldbkd=local); local builds are not reproducible. Rebuild with `--backend docker` or `--backend docker-all` to make it verifiable.")] - LocalBackend, #[error("unknown bldbkd value '{0}'")] UnknownBackend(String), } @@ -67,7 +71,7 @@ impl Cmd { let original_hash = hex::encode(Sha256::digest(&wasm_bytes)); let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; - let bldimg = find_meta(&spec.meta, "bldimg").ok_or(Error::MissingMeta("bldimg"))?; + let bldimg = find_meta(&spec.meta, "bldimg"); let bldbkd = find_meta(&spec.meta, "bldbkd"); let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path") @@ -81,7 +85,9 @@ impl Cmd { print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); - print.blankln(format!("Docker image: {bldimg}")); + if let Some(b) = &bldimg { + print.blankln(format!("Docker image: {b}")); + } print.blankln(format!( "Build backend: {}", bldbkd.as_deref().unwrap_or("docker") @@ -93,28 +99,25 @@ impl Cmd { print.blankln("Optimize: true"); } - // For docker-all builds the in-container stellar-cli is what built - // the wasm, so the host version doesn't have to match. For docker - // (host post-processing), the host stellar-cli is part of the build - // pipeline and a mismatch means the rebuild will diverge. Legacy - // wasms with no bldbkd are treated as `docker`. + // Pick the rebuild backend. For docker-all the in-container CLI did + // the build, so the host CLI version doesn't have to match. For + // docker and local, the host CLI is part of the build pipeline, so + // a cliver mismatch means the rebuild will diverge. Legacy wasms + // with no bldbkd are treated as docker. let backend = match bldbkd.as_deref().unwrap_or("docker") { "docker-all" => build::Backend::DockerAll { - image: bldimg.clone(), + image: bldimg.ok_or(Error::MissingMeta("bldimg"))?, }, "docker" => { - let running = version::one_line(); - if cliver != running { - return Err(Error::CliVersionMismatch { - expected: cliver, - actual: running, - }); - } + require_cliver_match(&cliver)?; build::Backend::Docker { - image: bldimg.clone(), + image: bldimg.ok_or(Error::MissingMeta("bldimg"))?, } } - "local" => return Err(Error::LocalBackend), + "local" => { + require_cliver_match(&cliver)?; + build::Backend::Local + } other => return Err(Error::UnknownBackend(other.to_string())), }; @@ -177,6 +180,18 @@ impl Cmd { } } +fn require_cliver_match(expected: &str) -> Result<(), Error> { + let running = version::one_line(); + if expected == running { + Ok(()) + } else { + Err(Error::CliVersionMismatch { + expected: expected.to_string(), + actual: running, + }) + } +} + fn git_top_level() -> Option { let out = std::process::Command::new("git") .args(["rev-parse", "--show-toplevel"]) From 8c0864359806fe40d66137eb1f6ac2e37f9d443c Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 11:36:27 +0000 Subject: [PATCH 51/64] docker backend: run stellar inside stellar-cli image --- Cargo.lock | 14 - cmd/soroban-cli/Cargo.toml | 3 - .../src/commands/contract/build.rs | 248 +++-------- .../src/commands/contract/build_docker.rs | 255 +++++++---- .../src/commands/contract/build_docker_all.rs | 404 ------------------ .../contract/build_docker_all/Dockerfile | 41 -- cmd/soroban-cli/src/commands/contract/mod.rs | 1 - .../src/commands/contract/verify.rs | 67 +-- 8 files changed, 238 insertions(+), 795 deletions(-) delete mode 100644 cmd/soroban-cli/src/commands/contract/build_docker_all.rs delete mode 100644 cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile diff --git a/Cargo.lock b/Cargo.lock index 4033c3a083..e55e8acf06 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5387,7 +5387,6 @@ dependencies = [ "async-trait", "base64 0.21.7", "bollard", - "bytes", "bytesize", "cargo_metadata", "chrono", @@ -5407,7 +5406,6 @@ dependencies = [ "heck 0.5.0", "hex", "home", - "http-body-util", "humantime", "indexmap 2.11.0", "itertools 0.10.5", @@ -5449,7 +5447,6 @@ dependencies = [ "strsim", "strum 0.17.1", "strum_macros 0.17.1", - "tar", "tempfile", "termcolor", "termcolor_output", @@ -6099,17 +6096,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" -[[package]] -name = "tar" -version = "0.4.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22692a6476a21fa75fdfc11d452fda482af402c008cdbaf3476414e122040973" -dependencies = [ - "filetime", - "libc", - "xattr", -] - [[package]] name = "temp-dir" version = "0.1.16" diff --git a/cmd/soroban-cli/Cargo.toml b/cmd/soroban-cli/Cargo.toml index 688c614761..0dd6933e1d 100644 --- a/cmd/soroban-cli/Cargo.toml +++ b/cmd/soroban-cli/Cargo.toml @@ -108,9 +108,6 @@ tempfile = "3.8.1" toml_edit = { workspace = true } rust-embed = { version = "8.2.0", features = ["debug-embed"] } bollard = { workspace = true } -bytes = { workspace = true } -tar = "0.4" -http-body-util = "0.1" futures-util = "0.3.30" futures = "0.3.30" home = "0.5.9" diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index c1684736a5..9e9bace59f 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -19,11 +19,7 @@ use stellar_xdr::curr::{Limited, Limits, ScMetaEntry, ScMetaV0, StringM, WriteXd #[cfg(feature = "additional-libs")] use crate::commands::contract::optimize; use crate::{ - commands::{ - container::shared::Args as ContainerArgs, - contract::{build_docker, build_docker_all}, - global, version, - }, + commands::{container::shared::Args as ContainerArgs, contract::build_docker, global, version}, print::Print, wasm, }; @@ -102,16 +98,13 @@ pub struct Cmd { /// Build backend. /// /// - `local` (default): build using the host's rust toolchain. - /// - `docker`: build inside `docker.io/library/rust:latest` (linux/amd64) - /// using the local docker daemon. The resolved image digest is - /// recorded in contract metadata. - /// - `docker=`: build inside the specified docker image. Pin - /// via `--backend docker=@sha256:...` for fully-reproducible builds. - /// - `docker-all` / `docker-all=`: like `docker`, but layers a - /// stellar-cli install on top of the base image and runs the entire - /// `stellar contract build` (including post-build steps) inside the - /// container. Captures the whole build pipeline in the image so the - /// host stellar-cli version is irrelevant for `verify`. + /// - `docker`: build inside `docker.io/stellar/stellar-cli:latest` + /// (linux/amd64) using the local docker daemon. The entire build + /// pipeline runs inside the container; the host orchestrates only. + /// The resolved image digest is recorded in contract metadata. + /// - `docker=`: build inside the specified image (must have + /// `stellar` as its entrypoint). Pin via + /// `--backend docker=@sha256:...` for fully-reproducible builds. /// /// Aborted docker builds may leave a stopped container; clean with `docker container prune`. #[arg( @@ -128,20 +121,9 @@ pub struct Cmd { /// Run cargo via `cargo +` to pin the rust toolchain. Set by /// `verify` from the wasm's `rsver` meta entry; not user-facing. - #[arg(long, hide = true)] + #[arg(skip)] pub rustup_toolchain: Option, - /// Override `bldimg` meta. Set by `--backend docker-all` when invoking - /// the in-container stellar-cli; not user-facing. - #[arg(long, hide = true)] - pub bldimg: Option, - - /// Override `bldbkd` meta (build backend identifier). Set by - /// `--backend docker-all` when invoking the in-container stellar-cli; - /// not user-facing. - #[arg(long, hide = true)] - pub bldbkd: Option, - #[command(flatten)] pub build_args: BuildArgs, @@ -162,25 +144,21 @@ pub enum Backend { /// Build with the host's rust toolchain. #[default] Local, - /// Build cargo inside a Docker container with the given image; post-build - /// steps run on the host. + /// Build inside a Docker container whose entrypoint is `stellar`. Docker { image: String }, - /// Build inside a stellar-cli image layered on top of the given base - /// image; the entire build pipeline runs inside the container. - DockerAll { image: String }, } impl Backend { /// Returns the docker image if the backend uses one, else `None`. pub fn docker_image(&self) -> Option<&str> { match self { - Self::Docker { image } | Self::DockerAll { image } => Some(image), + Self::Docker { image } => Some(image), Self::Local => None, } } } -const DEFAULT_DOCKER_IMAGE: &str = "docker.io/library/rust:latest"; +const DEFAULT_DOCKER_IMAGE: &str = "docker.io/stellar/stellar-cli:latest"; pub fn parse_backend(s: &str) -> Result { match s { @@ -188,9 +166,6 @@ pub fn parse_backend(s: &str) -> Result { "docker" => Ok(Backend::Docker { image: DEFAULT_DOCKER_IMAGE.to_string(), }), - "docker-all" => Ok(Backend::DockerAll { - image: DEFAULT_DOCKER_IMAGE.to_string(), - }), _ => { if let Some(image) = s.strip_prefix("docker=") { if image.is_empty() { @@ -199,16 +174,9 @@ pub fn parse_backend(s: &str) -> Result { Ok(Backend::Docker { image: image.to_string(), }) - } else if let Some(image) = s.strip_prefix("docker-all=") { - if image.is_empty() { - return Err("docker-all image cannot be empty; use `--backend docker-all` for the default image".to_string()); - } - Ok(Backend::DockerAll { - image: image.to_string(), - }) } else { Err(format!( - "unknown backend {s:?}; expected `local`, `docker`, `docker=`, `docker-all`, or `docker-all=`" + "unknown backend {s:?}; expected `local`, `docker`, or `docker=`" )) } } @@ -334,8 +302,6 @@ impl Default for Cmd { backend: Backend::Local, container_args: ContainerArgs { docker_host: None }, rustup_toolchain: None, - bldimg: None, - bldbkd: None, build_args: BuildArgs::default(), action: None, } @@ -376,9 +342,6 @@ impl Cmd { let wasm_target = get_wasm_target()?; let mut built_contracts = Vec::new(); - // Cache the resolved image digest across multi-contract workspace - // builds so the docker pull only runs once. - let mut resolved_image: Option = None; // Detect git state once for the build. Embed source_repo/source_rev // (and per-package source_path) when the workspace is a clean git @@ -399,13 +362,12 @@ impl Cmd { // container). Fall back to the cargo workspace root otherwise. let mount_root = git_root.as_deref().unwrap_or(workspace_root); - // `--backend docker-all` orchestrates a *layered* image build (base - // rust image + wasm target + stellar-cli installed at the host's - // commit sha) and runs the entire build pipeline inside it. The host - // skips the cargo + post-processing loop below. - if let Backend::DockerAll { image } = &self.backend { + // `--backend docker` runs the entire build pipeline (cargo + meta + // injection + spec filtering + optional wasm-opt) inside a container + // whose entrypoint is `stellar`. The host orchestrates only. + if let Backend::Docker { image } = &self.backend { return self - .run_docker_all( + .run_docker( image, &print, &packages, @@ -431,19 +393,11 @@ impl Cmd { cmd.arg(format!("+{toolchain}")); } cmd.arg("rustc"); - // Force --locked when building inside Docker so the build is deterministic. - if self.locked || self.backend.docker_image().is_some() { + if self.locked { cmd.arg("--locked"); } - let manifest_path = if self.backend.docker_image().is_some() { - // Inside the container the mount root is at /workspace. - let rel = pathdiff::diff_paths(&p.manifest_path, mount_root) - .unwrap_or(p.manifest_path.clone().into()); - Path::new(build_docker::WORK_DIR).join(rel) - } else { - pathdiff::diff_paths(&p.manifest_path, &working_dir) - .unwrap_or(p.manifest_path.clone().into()) - }; + let manifest_path = pathdiff::diff_paths(&p.manifest_path, &working_dir) + .unwrap_or(p.manifest_path.clone().into()); cmd.arg(format!( "--manifest-path={}", manifest_path.to_string_lossy() @@ -470,10 +424,7 @@ impl Cmd { } } - if let Some(rustflags) = make_rustflags_to_remap_absolute_paths( - &print, - self.backend.docker_image().is_some(), - )? { + if let Some(rustflags) = make_rustflags_to_remap_absolute_paths(&print)? { cmd.env("CARGO_BUILD_RUSTFLAGS", rustflags); } @@ -484,39 +435,13 @@ impl Cmd { let cmd_str = serialize_command(&cmd); if self.print_commands_only { - if let Some(image) = self.backend.docker_image() { - println!("# inside docker image: {image}"); - } println!("{cmd_str}"); } else { - let bldimg = if let Some(image) = self.backend.docker_image() { - let r = build_docker::run_in_docker( - &cmd, - &cmd_str, - image, - resolved_image.as_deref(), - mount_root, - target_dir.as_std_path(), - &wasm_target, - self.rustup_toolchain.as_deref(), - &self.container_args, - &print, - ) - .await?; - resolved_image = Some(r.clone()); - Some(r) - } else { - print.infoln(cmd_str); - let status = cmd.status().map_err(Error::CargoCmd)?; - if !status.success() { - return Err(Error::Exit(status)); - } - // When invoked from inside a `--backend docker-all` - // container, the outer host passes `--bldimg` so the - // in-container build still records the original base - // image digest in meta. - self.bldimg.clone() - }; + print.infoln(cmd_str); + let status = cmd.status().map_err(Error::CargoCmd)?; + if !status.success() { + return Err(Error::Exit(status)); + } let wasm_name = p.name.replace('-', "_"); let file = format!("{wasm_name}.wasm"); @@ -529,26 +454,10 @@ impl Cmd { pathdiff::diff_paths(&p.manifest_path, gr) .map(|p| p.to_string_lossy().into_owned()) }); - let bldbkd = self.bldbkd.clone().unwrap_or_else(|| { - match &self.backend { - Backend::Local => "local", - Backend::Docker { .. } => "docker", - // `Backend::DockerAll` returns early in `run` before - // reaching this loop; it never injects meta itself. - // The in-container `Backend::Local` invocation is - // what actually writes the wasm, with `--bldbkd - // docker-all` set so `self.bldbkd` is `Some` above. - Backend::DockerAll { .. } => { - unreachable!("DockerAll returns before inject_meta") - } - } - .to_string() - }); self.inject_meta( &target_file_path, &ExtraMeta { - bldimg: bldimg.clone(), - bldbkd: Some(bldbkd), + bldimg: None, source_repo: source_repo.clone(), source_rev: source_rev.clone(), bldopt_manifest_path, @@ -606,15 +515,13 @@ impl Cmd { Ok(built_contracts) } - /// Orchestrate a `--backend docker-all` build: build a stellar-cli image - /// layered on top of the chosen base, then run the full - /// `stellar contract build --backend local` pipeline inside it. - /// - /// The host doesn't run cargo or post-processing itself — those are done - /// by the in-container stellar-cli, against the bind-mounted source and - /// target directories. + /// Orchestrate a `--backend docker` build: pull the requested stellar-cli + /// image and run `stellar contract build --backend local` inside it + /// against the bind-mounted source. The in-container cli does cargo + + /// meta injection + spec filtering + optional wasm-opt itself; the host + /// only orchestrates and copies outputs to `--out-dir` if requested. #[allow(clippy::too_many_arguments)] - async fn run_docker_all( + async fn run_docker( &self, image: &str, print: &Print, @@ -624,35 +531,21 @@ impl Cmd { mount_root: &Path, wasm_target: &str, ) -> Result, Error> { - // Pick the stellar-cli rev to install in the layered image. We - // require the host CLI to have been built from a commit so the same - // version can be installed inside the container — the in-container - // build records that exact rev in `cliver`. If the host CLI was - // itself built from a dirty working tree, warn but proceed: the - // wasm will reflect the *clean* commit (whatever's pushed to - // origin), not the host's local diff. - let (cli_rev, host_dirty) = build_docker_all::extract_full_sha(version::git())?; - if host_dirty { - print.warnln(format!( - "host stellar-cli was built from a dirty working tree at {cli_rev}; the layered image will install the clean commit and the resulting wasm will not match a build from the host's local diff" - )); - } - // The user's --manifest-path (if any) is a host path; translate to - // the in-container `/workspace/...` form. If absent, fall back to - // the workspace root's Cargo.toml. + // the in-container `/source/...` form. If absent, fall back to the + // workspace root's Cargo.toml. let host_manifest = self .manifest_path .clone() .unwrap_or_else(|| workspace_root.join("Cargo.toml")); let rel = pathdiff::diff_paths(&host_manifest, mount_root) .unwrap_or_else(|| host_manifest.clone()); - let in_container_manifest = Path::new(build_docker::WORK_DIR) + let in_container_manifest = Path::new(build_docker::SOURCE_DIR) .join(rel) .to_string_lossy() .into_owned(); - let inner = build_docker_all::InnerBuildArgs { + let inner = build_docker::InnerBuildArgs { manifest_path: in_container_manifest, package: self.package.as_deref(), profile: &self.profile, @@ -661,15 +554,13 @@ impl Cmd { no_default_features: self.no_default_features, optimize: self.build_args.optimize, meta: &self.build_args.meta, - rustup_toolchain: self.rustup_toolchain.as_deref(), }; - build_docker_all::run_in_docker_all( + build_docker::run_in_docker( image, - &cli_rev, + None, + self.rustup_toolchain.as_deref(), mount_root, - target_dir, - wasm_target, &inner, &self.container_args, print, @@ -677,8 +568,7 @@ impl Cmd { .await?; // The in-container build wrote its outputs to the bind-mounted - // target dir (and optionally copied to --out-dir, but the inner - // doesn't see --out-dir; we copy here on the host side). + // target dir; copy to --out-dir if requested. let mut built = Vec::with_capacity(packages.len()); for p in packages { let wasm_name = p.name.replace('-', "_"); @@ -837,7 +727,6 @@ impl Cmd { // clean git checkout. let kvs = [ ("bldimg", extra.bldimg.as_deref()), - ("bldbkd", extra.bldbkd.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), ( @@ -969,12 +858,10 @@ impl Cmd { #[derive(Default, Debug, Clone)] struct ExtraMeta { /// `bldimg`: fully-qualified container image used to build (e.g. - /// `docker.io/library/rust@sha256:...`). Set when `--backend docker`. + /// `docker.io/stellar/stellar-cli@sha256:...`). Set when `--backend + /// docker`; injected by the in-container cli via `--meta bldimg=...`, + /// not by this struct (which is only used on the local code path). bldimg: Option, - /// `bldbkd`: build backend identifier — one of `local`, `docker`, or - /// `docker-all`. Always recorded so anyone inspecting the wasm can see - /// which build path produced it. - bldbkd: Option, /// `source_repo`: HTTPS URL of the workspace's git origin remote. /// Set only when the workspace is a clean git checkout. source_repo: Option, @@ -1143,22 +1030,7 @@ fn serialize_command(cmd: &Command) -> String { /// the absolute path replacement. Non-Unicode `CARGO_BUILD_RUSTFLAGS` will result in the /// existing rustflags being ignored, which is also the behavior of /// Cargo itself. -fn make_rustflags_to_remap_absolute_paths( - print: &Print, - in_docker: bool, -) -> Result, Error> { - // Inside the container the cargo registry is always mounted at - // /usr/local/cargo/registry and the workspace at /workspace, so the host's - // env vars (RUSTFLAGS, cargo_home) are irrelevant — the container does - // not inherit them. Use fixed container paths so two hosts produce the - // same wasm. - if in_docker { - return Ok(Some( - "--remap-path-prefix=/usr/local/cargo/registry/src/= --remap-path-prefix=/workspace=" - .to_string(), - )); - } - +fn make_rustflags_to_remap_absolute_paths(print: &Print) -> Result, Error> { let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; if format!("{}", cargo_home.display()) @@ -1387,26 +1259,20 @@ mod tests { } #[test] - fn parse_backend_recognizes_docker_all() { + fn parse_backend_cases() { assert!(matches!(parse_backend("local"), Ok(Backend::Local))); - assert!(matches!( - parse_backend("docker"), - Ok(Backend::Docker { .. }) - )); - assert!(matches!( - parse_backend("docker=quay.io/foo/bar:tag"), - Ok(Backend::Docker { .. }) - )); - assert!(matches!( - parse_backend("docker-all"), - Ok(Backend::DockerAll { .. }) - )); - let parsed = parse_backend("docker-all=quay.io/foo/bar:tag").unwrap(); + let parsed = parse_backend("docker").unwrap(); + match parsed { + Backend::Docker { image } => assert_eq!(image, DEFAULT_DOCKER_IMAGE), + other @ Backend::Local => panic!("expected Docker, got {other:?}"), + } + let parsed = parse_backend("docker=quay.io/foo/bar:tag").unwrap(); match parsed { - Backend::DockerAll { image } => assert_eq!(image, "quay.io/foo/bar:tag"), - other => panic!("expected DockerAll, got {other:?}"), + Backend::Docker { image } => assert_eq!(image, "quay.io/foo/bar:tag"), + other @ Backend::Local => panic!("expected Docker, got {other:?}"), } - assert!(parse_backend("docker-all=").is_err()); + assert!(parse_backend("docker=").is_err()); + assert!(parse_backend("docker-all").is_err()); assert!(parse_backend("nonsense").is_err()); } } diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index ab8c572350..7aba70da82 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -1,6 +1,25 @@ -use std::ffi::OsStr; +//! `--backend docker` build backend. +//! +//! Runs the entire `stellar contract build` pipeline inside a container +//! whose entrypoint is `stellar` (the official `stellar/stellar-cli` +//! image, or any user-supplied image with the same shape). The host +//! orchestrates only — pull, set up bind mounts, run, stream logs. +//! +//! The host CLI's version is irrelevant: whatever cli is in the image is +//! what builds the wasm and what records `cliver` / `rsver` / source meta +//! into the wasm. The host injects `bldimg` (the pulled image's resolved +//! digest) via the inner cli's `--meta` mechanism — no new flags. +//! +//! For `verify`, the recorded `bldimg` is pulled (so the same cli runs) +//! and `RUSTUP_TOOLCHAIN` is set from the wasm's `rsver` so the rust +//! toolchain matches whatever the original build used. +//! +//! User-supplied images must: +//! - Have `stellar` as their entrypoint +//! - Have `rustup` available with the `wasm32v1-none` target installed +//! (preflight-checked before the build runs) + use std::path::Path; -use std::process::Command; use bollard::{ models::ContainerCreateBody, @@ -19,8 +38,11 @@ use crate::{ }; const PLATFORM: &str = "linux/amd64"; -pub const WORK_DIR: &str = "/workspace"; -const TARGET_DIR: &str = "/target"; +/// Where the workspace gets bind-mounted inside the container. Matches the +/// official `stellar/stellar-cli` image's `WORKDIR`. Cargo writes its +/// target directory under this path, so the host reads the wasm via the +/// same bind mount — no separate `/target` mount needed. +pub const SOURCE_DIR: &str = "/source"; const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; #[derive(thiserror::Error, Debug)] @@ -49,31 +71,39 @@ pub enum Error { #[error("docker run: {0}")] Runtime(#[from] bollard::errors::Error), - #[error("resolving CARGO_HOME / RUSTUP_HOME: {0}")] + #[error("resolving CARGO_HOME: {0}")] CargoHome(std::io::Error), +} - #[error("building stellar-cli image: {0}")] - ImageBuild(String), - - #[error("packaging Dockerfile context: {0}")] - Tar(std::io::Error), - - #[error("host stellar-cli has no commit sha to install in container; rebuild from a git checkout (or install via `cargo install --git ... --rev `)")] - NoHostCliRev, +/// Forwarded host build args used to construct the inner +/// `stellar contract build --backend local` invocation. `manifest_path` is +/// expected to already be in container-relative form (`/source/...`). +pub struct InnerBuildArgs<'a> { + pub manifest_path: String, + pub package: Option<&'a str>, + pub profile: &'a str, + pub features: Option<&'a str>, + pub all_features: bool, + pub no_default_features: bool, + pub optimize: bool, + pub meta: &'a [(String, String)], } -/// Pull (if needed) and run the host `cmd` inside a linux/amd64 container, -/// returning the resolved `name@sha256:...` reference for embedding into meta. +/// Pull the image (if needed), then run the in-container +/// `stellar contract build --backend local --meta bldimg=` against +/// the bind-mounted source. Returns the resolved image digest for the host +/// to record. +/// +/// `rsver` is `None` for fresh builds and `Some()` for verify; +/// when set, `RUSTUP_TOOLCHAIN` inside the container is pinned to that +/// toolchain so rustup-managed cargo uses the matching rust version. #[allow(clippy::too_many_arguments)] pub async fn run_in_docker( - cmd: &Command, - cmd_str: &str, image: &str, pre_resolved: Option<&str>, + rsver: Option<&str>, mount_root: &Path, - target_dir: &Path, - wasm_target: &str, - pin_toolchain: Option<&str>, + inner: &InnerBuildArgs<'_>, container_args: &ContainerArgs, print: &Print, ) -> Result { @@ -82,80 +112,61 @@ pub async fn run_in_docker( .await .map_err(Error::RuntimeNotRunning)?; - // Pull and resolve only on the first call; subsequent invocations within - // the same build (e.g. workspace with multiple contracts) reuse the - // already-resolved digest and skip the pull progress output. let resolved = if let Some(r) = pre_resolved { r.to_string() } else { pull_image(&docker, image, print).await?; resolve_image_digest(&docker, image).await? }; - // Print the cargo invocation after the pull progress so the on-screen - // order matches execution: pull → cargo → cargo output. - print.infoln(cmd_str); - - // Bind-mount the host's cargo registry to cache crate downloads across - // runs. Crate sources are platform-agnostic so this is safe. - // - // We deliberately do not mount the host's `~/.rustup`: it contains - // toolchain binaries built for the host's OS/arch (e.g. Mach-O on macOS), - // which the linux/amd64 container cannot exec. The image's pre-installed - // rustup state is used instead; the wasm target is installed on each - // container run. + + print.infoln(format_inner_cmd(inner, &resolved)); + run_inner_build(&docker, &resolved, inner, rsver, mount_root).await?; + + Ok(resolved) +} + +async fn run_inner_build( + docker: &Docker, + image: &str, + inner: &InnerBuildArgs<'_>, + rsver: Option<&str>, + mount_root: &Path, +) -> Result<(), Error> { let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; let binds = vec![ - format!("{}:{}", mount_root.display(), WORK_DIR), - format!("{}:{}", target_dir.display(), TARGET_DIR), + format!("{}:{}", mount_root.display(), SOURCE_DIR), format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), ]; - let mut env: Vec = cmd - .get_envs() - .filter_map(|(k, v)| { - v.map(|val| format!("{}={}", k.to_string_lossy(), val.to_string_lossy())) - }) - .collect(); - env.push(format!("CARGO_TARGET_DIR={TARGET_DIR}")); - env.push(format!( - "SOURCE_DATE_EPOCH={}", - source_date_epoch(mount_root) - )); - // Force cargo to emit color (otherwise cargo detects the non-TTY stdout - // and falls back to monochrome). Matches what users see for local builds. - env.push("CARGO_TERM_COLOR=always".to_string()); - - let argv: Vec = std::iter::once(cmd.get_program()) - .chain(cmd.get_args()) - .map(OsStr::to_string_lossy) - .map(std::borrow::Cow::into_owned) - .collect(); - // Always install the wasm target before the build so we don't depend on - // the workspace's `rust-toolchain.toml` having configured it. Args pass - // through `$@` so we don't have to shell-escape. - let toolchain_arg = pin_toolchain - .map(|t| format!("--toolchain {t} ")) - .unwrap_or_default(); - let mut container_cmd = vec![ + let mut env = vec![ + format!("SOURCE_DATE_EPOCH={}", source_date_epoch(mount_root)), + "CARGO_TERM_COLOR=always".to_string(), + ]; + if let Some(t) = rsver { + env.push(format!("RUSTUP_TOOLCHAIN={t}")); + } + + // Override the image's entrypoint with `sh -c` so we can preflight-check + // the wasm target before invoking the cli. Works against the official + // `stellar/stellar-cli` image and any compatible custom image. + let entrypoint = vec![ "sh".to_string(), "-c".to_string(), - format!("rustup --quiet target add {toolchain_arg}{wasm_target} && exec \"$@\""), - "sh".to_string(), + preflight_script(), ]; - container_cmd.extend(argv); + let argv = build_inner_argv(inner, image); let config = ContainerCreateBody { - image: Some(resolved.clone()), - cmd: Some(container_cmd), + image: Some(image.to_string()), + entrypoint: Some(entrypoint), + cmd: Some(argv), env: Some(env), - working_dir: Some(WORK_DIR.to_string()), + working_dir: Some(SOURCE_DIR.to_string()), user: current_uid_gid(), attach_stdout: Some(true), attach_stderr: Some(true), host_config: Some(HostConfig { binds: Some(binds), - // auto_remove=false so we can stream logs first, then call - // remove_container ourselves with force=true even on failure paths. auto_remove: Some(false), ..Default::default() }), @@ -167,7 +178,7 @@ pub async fn run_in_docker( .await? .id; - let result = run_and_wait(&docker, &container_id).await; + let result = stream_and_wait(docker, &container_id).await; let _ = docker .remove_container( @@ -179,11 +190,22 @@ pub async fn run_in_docker( ) .await; - result?; - Ok(resolved) + result } -async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { +/// Shell script run as the container's entrypoint. Preflight-checks that +/// `wasm32v1-none` is installed in the active rust toolchain, then `exec`s +/// `stellar` with the args passed via `cmd`. +fn preflight_script() -> String { + "rustup target list --installed 2>/dev/null | grep -q '^wasm32v1-none$' || { \ + echo 'error: wasm32v1-none target not installed in image; install it with `rustup target add wasm32v1-none` or use a different image' >&2; \ + exit 1; \ + }; \ + exec stellar \"$@\"" + .to_string() +} + +async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { docker .start_container(container_id, None::) .await?; @@ -201,8 +223,6 @@ async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> let s = item?.to_string(); let s = s.trim_end_matches('\n'); if !s.is_empty() { - // Emit container output raw (no `ℹ️` prefix) so it looks like - // cargo running locally. eprintln!("{s}"); } } @@ -222,7 +242,66 @@ async fn run_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> Ok(()) } -pub(super) async fn pull_image(docker: &Docker, image: &str, print: &Print) -> Result<(), Error> { +/// Build the argv passed via `cmd` after the `sh -c '... exec stellar "$@"'` +/// entrypoint. The first element is the `$0` placeholder for sh; the rest +/// become `stellar`'s actual args. +/// +/// `bldimg` is forwarded as `--meta bldimg=` so the in-container +/// `--backend local` build records the image identity in the wasm meta — +/// without needing a new cli flag. +fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { + let mut argv: Vec = vec![ + "sh".to_string(), // $0 placeholder + "contract".to_string(), + "build".to_string(), + "--backend".to_string(), + "local".to_string(), + "--manifest-path".to_string(), + inner.manifest_path.clone(), + "--profile".to_string(), + inner.profile.to_string(), + "--locked".to_string(), + "--meta".to_string(), + format!("bldimg={image}"), + ]; + if let Some(p) = inner.package { + argv.push("--package".to_string()); + argv.push(p.to_string()); + } + if let Some(f) = inner.features { + argv.push("--features".to_string()); + argv.push(f.to_string()); + } + if inner.all_features { + argv.push("--all-features".to_string()); + } + if inner.no_default_features { + argv.push("--no-default-features".to_string()); + } + if inner.optimize { + argv.push("--optimize".to_string()); + } + for (k, v) in inner.meta { + argv.push("--meta".to_string()); + argv.push(format!("{k}={v}")); + } + argv +} + +fn format_inner_cmd(inner: &InnerBuildArgs<'_>, image: &str) -> String { + // Skip the `$0` placeholder when displaying. + build_inner_argv(inner, image) + .into_iter() + .skip(1) + .collect::>() + .join(" ") +} + +pub(super) async fn pull_image( + docker: &Docker, + image: &str, + print: &Print, +) -> Result<(), Error> { let mut stream = docker.create_image( Some(CreateImageOptions { from_image: Some(image.to_string()), @@ -259,7 +338,10 @@ pub(super) async fn pull_image(docker: &Docker, image: &str, print: &Print) -> R // Returns a fully-qualified `/@sha256:` reference so // that `verify` on a different machine can resolve it without depending on // local registry config. -pub(super) async fn resolve_image_digest(docker: &Docker, image: &str) -> Result { +pub(super) async fn resolve_image_digest( + docker: &Docker, + image: &str, +) -> Result { let canonical = fully_qualify(strip_tag(image)); let digest = if let Some(d) = sha256_digest(image) { d.to_string() @@ -282,16 +364,13 @@ pub(super) async fn resolve_image_digest(docker: &Docker, image: &str) -> Result Ok(format!("{canonical}@{digest}")) } -/// Returns the `sha256:...` portion of a `@sha256:...` reference, if present. fn sha256_digest(image: &str) -> Option<&str> { let (_, after) = image.rsplit_once('@')?; after.starts_with("sha256:").then_some(after) } -/// Strip any `@sha256:...` and `:tag` suffix, leaving only the repository name. fn strip_tag(image: &str) -> &str { let no_digest = image.split_once('@').map_or(image, |(name, _)| name); - // Tags appear after the last `/`; a `:` in the host portion (host:port) is not a tag. match no_digest.rfind('/') { Some(slash) => match no_digest[slash + 1..].rfind(':') { Some(colon) => &no_digest[..slash + 1 + colon], @@ -304,7 +383,6 @@ fn strip_tag(image: &str) -> &str { } } -/// Add the implicit `docker.io` registry (and `library/` namespace for short names). fn fully_qualify(name: &str) -> String { let has_registry = name .split_once('/') @@ -320,22 +398,21 @@ fn fully_qualify(name: &str) -> String { #[allow(clippy::unnecessary_wraps)] #[cfg(unix)] -pub(super) fn current_uid_gid() -> Option { - // SAFETY: getuid/getgid are infallible POSIX calls. +fn current_uid_gid() -> Option { Some(format!("{}:{}", unsafe { libc::getuid() }, unsafe { libc::getgid() })) } #[cfg(not(unix))] -pub(super) fn current_uid_gid() -> Option { +fn current_uid_gid() -> Option { None } /// Best-effort SOURCE_DATE_EPOCH from the workspace's HEAD commit time; /// falls back to `"0"` when not in a git repo. -pub(super) fn source_date_epoch(mount_root: &Path) -> String { - Command::new("git") +fn source_date_epoch(mount_root: &Path) -> String { + std::process::Command::new("git") .arg("-C") .arg(mount_root) .args(["log", "-1", "--format=%ct"]) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs b/cmd/soroban-cli/src/commands/contract/build_docker_all.rs deleted file mode 100644 index 35d34cecf2..0000000000 --- a/cmd/soroban-cli/src/commands/contract/build_docker_all.rs +++ /dev/null @@ -1,404 +0,0 @@ -//! `--backend docker-all` build backend. -//! -//! Layers a stellar-cli install on top of the user's chosen rust base image, -//! then runs the entire `stellar contract build --backend local` pipeline -//! inside the container — including the post-build steps (meta injection, -//! spec filtering, optimization). The host-side stellar-cli only orchestrates. -//! -//! Reproducibility: `bldimg` records the *base* rust image digest (same as -//! `--backend docker`), `cliver` records the stellar-cli version installed -//! into the layered image, and `rsver` is recorded by soroban-sdk. Together -//! these three reconstruct the layered image on `verify`. - -use std::collections::HashMap; -use std::path::Path; - -use bollard::{ - models::ContainerCreateBody, - query_parameters::{ - BuildImageOptionsBuilder, CreateContainerOptions, LogsOptions, RemoveContainerOptions, - StartContainerOptions, WaitContainerOptions, - }, - service::HostConfig, - Docker, -}; -use bytes::Bytes; -use futures_util::StreamExt; -use http_body_util::{Either, Full}; - -use super::build_docker::{ - current_uid_gid, pull_image, resolve_image_digest, source_date_epoch, Error, -}; -use crate::{commands::container::shared::Args as ContainerArgs, print::Print}; - -const DOCKERFILE: &str = include_str!("build_docker_all/Dockerfile"); -const STELLAR_CLI_REPO: &str = "https://github.com/stellar/stellar-cli"; - -/// Where the workspace and target are mounted inside the container, and where -/// the cargo registry cache lives. The first two are bind mounts shared with -/// the host; the registry cache is a bind mount of the host's cargo registry. -const WORK_DIR: &str = "/workspace"; -const TARGET_DIR: &str = "/target"; -const REGISTRY_DIR: &str = "/usr/local/cargo/registry"; -const PLATFORM: &str = "linux/amd64"; - -/// Forwarded host build args used to construct the inner -/// `stellar contract build --backend local` invocation. `manifest_path` is -/// expected to already be in container-relative form (`/workspace/...`). -pub struct InnerBuildArgs<'a> { - pub manifest_path: String, - pub package: Option<&'a str>, - pub profile: &'a str, - pub features: Option<&'a str>, - pub all_features: bool, - pub no_default_features: bool, - pub optimize: bool, - pub meta: &'a [(String, String)], - pub rustup_toolchain: Option<&'a str>, -} - -/// Pull the base image, build a layered stellar-cli image on top of it, then -/// run `stellar contract build --backend local --bldimg --bldbkd docker-all` -/// inside that layered image. Returns the resolved base image reference for -/// embedding into `bldimg`. -#[allow(clippy::too_many_arguments)] -pub async fn run_in_docker_all( - base_image: &str, - cli_rev: &str, - mount_root: &Path, - target_dir: &Path, - wasm_target: &str, - inner: &InnerBuildArgs<'_>, - container_args: &ContainerArgs, - print: &Print, -) -> Result { - let docker: Docker = container_args - .connect_to_docker(print) - .await - .map_err(Error::RuntimeNotRunning)?; - - pull_image(&docker, base_image, print).await?; - let base_resolved = resolve_image_digest(&docker, base_image).await?; - - let layered_tag = format!("stellar-cli-build:{}", short_hash(&base_resolved, cli_rev)); - print.infoln(format!( - "Building stellar-cli build image {layered_tag} (base {base_resolved}, stellar-cli {cli_rev})" - )); - build_layered_image(&docker, &base_resolved, cli_rev, wasm_target, &layered_tag).await?; - - print.infoln(format_inner_cmd(inner, &base_resolved)); - - run_inner_build( - &docker, - &layered_tag, - &base_resolved, - inner, - mount_root, - target_dir, - ) - .await?; - - Ok(base_resolved) -} - -/// Build the layered image (FROM base + rustup target + cargo install stellar-cli). -async fn build_layered_image( - docker: &Docker, - base_image: &str, - cli_rev: &str, - wasm_target: &str, - tag: &str, -) -> Result<(), Error> { - let context = build_tar_context()?; - - let mut buildargs: HashMap = HashMap::new(); - buildargs.insert("BASE_IMAGE".to_string(), base_image.to_string()); - buildargs.insert("WASM_TARGET".to_string(), wasm_target.to_string()); - buildargs.insert( - "STELLAR_CLI_REPO".to_string(), - STELLAR_CLI_REPO.to_string(), - ); - buildargs.insert("STELLAR_CLI_REV".to_string(), cli_rev.to_string()); - - let options = BuildImageOptionsBuilder::default() - .dockerfile("Dockerfile") - .t(tag) - .platform(PLATFORM) - .buildargs(&buildargs) - .rm(true) - .build(); - - let body = Either::Left(Full::new(context)); - let mut stream = docker.build_image(options, None, Some(body)); - while let Some(item) = stream.next().await { - let info = item?; - // Stream the build output verbatim. Docker emits each step header, - // each cargo progress line, and each compile message as its own - // chunk with its own line ending — re-formatting (trim, indent, - // print.blankln) makes the cargo install output unreadable. Match - // what `run_inner_build` does and trust the chunks. - if let Some(s) = info.stream { - eprint!("{s}"); - } - if let Some(detail) = info.error_detail { - return Err(Error::ImageBuild( - detail.message.unwrap_or_else(|| "unknown".to_string()), - )); - } - } - Ok(()) -} - -/// Construct an in-memory tar containing only the embedded Dockerfile. -fn build_tar_context() -> Result { - let dockerfile = DOCKERFILE.as_bytes(); - let mut buf = Vec::new(); - { - let mut builder = tar::Builder::new(&mut buf); - let mut header = tar::Header::new_gnu(); - header.set_path("Dockerfile").map_err(Error::Tar)?; - header.set_size(dockerfile.len() as u64); - header.set_mode(0o644); - header.set_cksum(); - builder.append(&header, dockerfile).map_err(Error::Tar)?; - builder.finish().map_err(Error::Tar)?; - } - Ok(Bytes::from(buf)) -} - -/// Run the in-container `stellar contract build --backend local --bldimg ... --bldbkd docker-all`. -async fn run_inner_build( - docker: &Docker, - layered_tag: &str, - base_resolved: &str, - inner: &InnerBuildArgs<'_>, - mount_root: &Path, - target_dir: &Path, -) -> Result<(), Error> { - let cargo_home = home::cargo_home().map_err(Error::CargoHome)?; - let binds = vec![ - format!("{}:{}", mount_root.display(), WORK_DIR), - format!("{}:{}", target_dir.display(), TARGET_DIR), - format!("{}:{}", cargo_home.join("registry").display(), REGISTRY_DIR), - ]; - - let env = vec![ - format!("CARGO_TARGET_DIR={TARGET_DIR}"), - format!("SOURCE_DATE_EPOCH={}", source_date_epoch(mount_root)), - "CARGO_TERM_COLOR=always".to_string(), - ]; - - let argv = build_inner_argv(inner, base_resolved); - - let config = ContainerCreateBody { - image: Some(layered_tag.to_string()), - cmd: Some(argv), - env: Some(env), - working_dir: Some(WORK_DIR.to_string()), - user: current_uid_gid(), - attach_stdout: Some(true), - attach_stderr: Some(true), - host_config: Some(HostConfig { - binds: Some(binds), - auto_remove: Some(false), - ..Default::default() - }), - ..Default::default() - }; - - let container_id = docker - .create_container(None::, config) - .await? - .id; - - let result = stream_and_wait(docker, &container_id).await; - - let _ = docker - .remove_container( - &container_id, - Some(RemoveContainerOptions { - force: true, - ..Default::default() - }), - ) - .await; - - result -} - -async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { - docker - .start_container(container_id, None::) - .await?; - - let mut log_stream = docker.logs( - container_id, - Some(LogsOptions { - follow: true, - stdout: true, - stderr: true, - ..Default::default() - }), - ); - while let Some(item) = log_stream.next().await { - let s = item?.to_string(); - let s = s.trim_end_matches('\n'); - if !s.is_empty() { - eprintln!("{s}"); - } - } - - let mut wait_stream = docker.wait_container(container_id, None::); - let mut exit_code: i64 = 0; - while let Some(res) = wait_stream.next().await { - match res { - Ok(r) => exit_code = r.status_code, - Err(bollard::errors::Error::DockerContainerWaitError { code, .. }) => exit_code = code, - Err(e) => return Err(Error::Runtime(e)), - } - } - if exit_code != 0 { - return Err(Error::BuildExit(exit_code)); - } - Ok(()) -} - -/// Build the argv for the in-container `stellar contract build --backend local`. -fn build_inner_argv(inner: &InnerBuildArgs<'_>, base_resolved: &str) -> Vec { - let mut argv: Vec = vec![ - "stellar".to_string(), - "contract".to_string(), - "build".to_string(), - "--backend".to_string(), - "local".to_string(), - "--bldimg".to_string(), - base_resolved.to_string(), - "--bldbkd".to_string(), - "docker-all".to_string(), - "--manifest-path".to_string(), - inner.manifest_path.clone(), - "--profile".to_string(), - inner.profile.to_string(), - // Always --locked so the inner build is deterministic. - "--locked".to_string(), - ]; - if let Some(p) = inner.package { - argv.push("--package".to_string()); - argv.push(p.to_string()); - } - if let Some(f) = inner.features { - argv.push("--features".to_string()); - argv.push(f.to_string()); - } - if inner.all_features { - argv.push("--all-features".to_string()); - } - if inner.no_default_features { - argv.push("--no-default-features".to_string()); - } - if inner.optimize { - argv.push("--optimize".to_string()); - } - for (k, v) in inner.meta { - argv.push("--meta".to_string()); - argv.push(format!("{k}={v}")); - } - if let Some(t) = inner.rustup_toolchain { - argv.push("--rustup-toolchain".to_string()); - argv.push(t.to_string()); - } - argv -} - -/// Stable short tag suffix from `(base_image, cli_rev)`. -fn short_hash(base_image: &str, cli_rev: &str) -> String { - use sha2::{Digest, Sha256}; - let mut h = Sha256::new(); - h.update(base_image.as_bytes()); - h.update(b"\0"); - h.update(cli_rev.as_bytes()); - let digest = h.finalize(); - hex::encode(&digest[..8]) -} - -/// One-line preview of the inner cargo command, for the `ℹ︎ ...` info log. -fn format_inner_cmd(inner: &InnerBuildArgs<'_>, base_resolved: &str) -> String { - build_inner_argv(inner, base_resolved).join(" ") -} - -/// Reduce a host stellar-cli git revision string to a 40-char commit sha -/// plus a dirty flag. -/// -/// `crate_git_revision` (stellar's fork) emits one of two shapes: -/// - `<40-char-sha>` for a clean working tree -/// - `<40-char-sha>-dirty` for a working tree with uncommitted changes -/// -/// The bare sha is what `cargo install --git --rev` needs. Callers should -/// warn the user when the dirty flag is set: the layered image will install -/// the *clean* commit, so the resulting wasm won't match what a clean host -/// CLI would have produced. -pub fn extract_full_sha(git: &str) -> Result<(String, bool), Error> { - if git.is_empty() { - return Err(Error::NoHostCliRev); - } - let (sha, dirty) = match git.strip_suffix("-dirty") { - Some(s) => (s, true), - None => (git, false), - }; - if is_full_sha(sha) { - Ok((sha.to_string(), dirty)) - } else { - Err(Error::NoHostCliRev) - } -} - -fn is_full_sha(s: &str) -> bool { - s.len() == 40 && s.chars().all(|c| c.is_ascii_hexdigit()) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn extract_full_sha_clean() { - let sha = "60f7458e7ecffddf2f2d91dc6d0d2db4fab03ecc"; - assert_eq!(extract_full_sha(sha).unwrap(), (sha.to_string(), false)); - } - - #[test] - fn extract_full_sha_dirty() { - let sha = "edc5397642bb6e53a4eb6c96348493df105ffa69"; - let input = format!("{sha}-dirty"); - assert_eq!( - extract_full_sha(&input).unwrap(), - (sha.to_string(), true) - ); - } - - #[test] - fn extract_full_sha_empty_errors() { - assert!(matches!(extract_full_sha(""), Err(Error::NoHostCliRev))); - } - - #[test] - fn extract_full_sha_short_errors() { - assert!(matches!(extract_full_sha("abc"), Err(Error::NoHostCliRev))); - } - - #[test] - fn extract_full_sha_describe_form_no_longer_supported() { - // `crate_git_revision` (stellar's fork) emits only `` or - // `-dirty` now; the legacy git-describe form should error. - let s = "v20.0.0-836-gfe07b3678833e07c43235a6caaeccff81e146856"; - assert!(matches!(extract_full_sha(s), Err(Error::NoHostCliRev))); - } - - #[test] - fn short_hash_is_deterministic_and_short() { - let a = short_hash("docker.io/library/rust@sha256:abc", "deadbeef"); - let b = short_hash("docker.io/library/rust@sha256:abc", "deadbeef"); - assert_eq!(a, b); - assert_eq!(a.len(), 16); - } -} diff --git a/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile b/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile deleted file mode 100644 index e545ca1b45..0000000000 --- a/cmd/soroban-cli/src/commands/contract/build_docker_all/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -# syntax=docker/dockerfile:1 -# -# Build image used by `stellar contract build --backend docker-all`. -# -# Layered on top of a user-chosen rust base image so the rust toolchain stays -# pinnable to whatever the user trusts. The stellar-cli is installed inside -# at the same git revision the host stellar-cli was built from, so the entire -# build pipeline (cargo + meta injection + spec filtering + optimize) runs -# inside the container and the image captures everything except the source. - -ARG BASE_IMAGE -FROM ${BASE_IMAGE} - -# stellar-cli's default `additional-libs` feature bundles `wasm-opt`, -# `keyring`, and `stellar-ledger`. On Linux these pull in C deps: -# - keyring → libdbus-sys → libdbus-1-dev -# - stellar-ledger → hidapi → libudev-dev -# - common → openssl-sys → libssl-dev -# We don't use keyring or ledger inside this container — it only runs -# `stellar contract build` — but the binary still has to compile. Installing -# the dev headers is the simplest path; a future cleanup is to split -# `additional-libs` into separate features so docker-all can install with -# just `--features wasm-opt`. -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - libdbus-1-dev \ - libudev-dev \ - libssl-dev \ - pkg-config && \ - rm -rf /var/lib/apt/lists/* - -ARG WASM_TARGET -RUN rustup target add ${WASM_TARGET} - -ARG STELLAR_CLI_REPO -ARG STELLAR_CLI_REV -RUN cargo install \ - --locked \ - --git ${STELLAR_CLI_REPO} \ - --rev ${STELLAR_CLI_REV} \ - stellar-cli diff --git a/cmd/soroban-cli/src/commands/contract/mod.rs b/cmd/soroban-cli/src/commands/contract/mod.rs index 3cb344de6e..602e107972 100644 --- a/cmd/soroban-cli/src/commands/contract/mod.rs +++ b/cmd/soroban-cli/src/commands/contract/mod.rs @@ -4,7 +4,6 @@ pub mod asset; pub mod bindings; pub mod build; pub mod build_docker; -pub mod build_docker_all; pub mod deploy; pub mod extend; pub mod fetch; diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index bd22088482..7bcb849b90 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -9,20 +9,22 @@ use stellar_xdr::curr::{ScMetaEntry, ScMetaV0}; use super::build; use super::info::shared::{self, fetch, Contract, Fetched}; use crate::commands::container::shared::Args as ContainerArgs; -use crate::commands::{global, version}; +use crate::commands::global; use crate::print::Print; -/// Verify a wasm by rebuilding it with the same backend recorded in its metadata. +/// Verify a wasm by rebuilding it and comparing bytes. /// -/// All cdylib contracts in the workspace are rebuilt; verification succeeds if -/// any rebuilt artifact is byte-identical to the input. The user is responsible -/// for checking out the matching commit before running. +/// All cdylib contracts in the workspace are rebuilt; verification succeeds +/// if any rebuilt artifact is byte-identical to the input. The user is +/// responsible for checking out the matching commit before running. /// -/// `bldbkd: docker` and `bldbkd: docker-all` rebuild inside the recorded image. -/// `bldbkd: local` rebuilds with the host rust toolchain pinned to the wasm's -/// `rsver` — this is best-effort: local builds depend on environment factors -/// (system libs, paths, env vars) that aren't captured in meta, so a verify -/// match is informative but not as strong a guarantee as the docker backends. +/// Backend selection from meta: +/// - `bldimg` present: rebuild inside that image (with `RUSTUP_TOOLCHAIN` +/// pinned to the wasm's `rsver` so the rust version matches whatever the +/// original build used). +/// - `bldimg` absent: rebuild locally with the host's rust toolchain pinned +/// via `cargo +`. Best-effort — local builds depend on environment +/// factors (system libs, env vars) that aren't captured in meta. #[derive(Parser, Debug, Clone)] #[group(skip)] pub struct Cmd { @@ -45,8 +47,6 @@ pub enum Error { StellarAssetContract, #[error("required '{0}' meta entry not found in contract; rebuild the wasm with `stellar contract build --backend docker` to make it verifiable")] MissingMeta(&'static str), - #[error("stellar-cli version mismatch: contract was built with '{expected}', running stellar-cli is '{actual}'. Install the matching CLI version and re-run.")] - CliVersionMismatch { expected: String, actual: String }, #[error("verification failed: none of the rebuilt artifacts ({}) match original ({expected})", produced.iter().map(|(n, h)| format!("{n}={h}")).collect::>().join(", "))] Mismatch { expected: String, @@ -54,12 +54,9 @@ pub enum Error { }, #[error("reading rebuilt wasm: {0}")] ReadingRebuilt(std::io::Error), - #[error("unknown bldbkd value '{0}'")] - UnknownBackend(String), } impl Cmd { - #[allow(clippy::too_many_lines)] pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> { let print = Print::new(global_args.quiet); @@ -72,7 +69,6 @@ impl Cmd { let spec = Spec::new(&wasm_bytes)?; let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg"); - let bldbkd = find_meta(&spec.meta, "bldbkd"); let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path") .ok_or(Error::MissingMeta("bldopt_manifest_path"))?; @@ -88,10 +84,6 @@ impl Cmd { if let Some(b) = &bldimg { print.blankln(format!("Docker image: {b}")); } - print.blankln(format!( - "Build backend: {}", - bldbkd.as_deref().unwrap_or("docker") - )); print.blankln(format!("Manifest path: {bldopt_manifest_path}")); print.blankln(format!("Package: {bldopt_package}")); print.blankln(format!("Profile: {bldopt_profile}")); @@ -99,26 +91,9 @@ impl Cmd { print.blankln("Optimize: true"); } - // Pick the rebuild backend. For docker-all the in-container CLI did - // the build, so the host CLI version doesn't have to match. For - // docker and local, the host CLI is part of the build pipeline, so - // a cliver mismatch means the rebuild will diverge. Legacy wasms - // with no bldbkd are treated as docker. - let backend = match bldbkd.as_deref().unwrap_or("docker") { - "docker-all" => build::Backend::DockerAll { - image: bldimg.ok_or(Error::MissingMeta("bldimg"))?, - }, - "docker" => { - require_cliver_match(&cliver)?; - build::Backend::Docker { - image: bldimg.ok_or(Error::MissingMeta("bldimg"))?, - } - } - "local" => { - require_cliver_match(&cliver)?; - build::Backend::Local - } - other => return Err(Error::UnknownBackend(other.to_string())), + let backend = match bldimg { + Some(image) => build::Backend::Docker { image }, + None => build::Backend::Local, }; // Resolve the manifest path relative to the cwd's git top-level so @@ -180,18 +155,6 @@ impl Cmd { } } -fn require_cliver_match(expected: &str) -> Result<(), Error> { - let running = version::one_line(); - if expected == running { - Ok(()) - } else { - Err(Error::CliVersionMismatch { - expected: expected.to_string(), - actual: running, - }) - } -} - fn git_top_level() -> Option { let out = std::process::Command::new("git") .args(["rev-parse", "--show-toplevel"]) From 7970c1d972b5fd1d27945b2651dab7f4c4b00c04 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 11:39:58 +0000 Subject: [PATCH 52/64] revert files unrelated to docker backend --- .gitignore | 4 +- .../src/commands/contract/alias/ls.rs | 117 ++++++++++++------ cmd/soroban-cli/src/commands/doctor.rs | 11 +- cmd/soroban-cli/src/commands/network/ls.rs | 55 +++++++- cmd/soroban-cli/src/config/data.rs | 94 +++++++++++--- cmd/soroban-cli/src/config/network.rs | 63 +++++++++- 6 files changed, 285 insertions(+), 59 deletions(-) diff --git a/.gitignore b/.gitignore index 1d8ee2e41a..c91de6f2e2 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,6 @@ local.sh .stellar .zed node_modules/ -.DS_Store \ No newline at end of file +.DS_Store +logs/ +ai-summary/ diff --git a/cmd/soroban-cli/src/commands/contract/alias/ls.rs b/cmd/soroban-cli/src/commands/contract/alias/ls.rs index f9a38fdd2e..733f639da3 100644 --- a/cmd/soroban-cli/src/commands/contract/alias/ls.rs +++ b/cmd/soroban-cli/src/commands/contract/alias/ls.rs @@ -1,5 +1,6 @@ use clap::Parser; use std::collections::HashMap; +use std::ffi::OsStr; use std::fmt::Debug; use std::path::Path; use std::{fs, process}; @@ -23,12 +24,6 @@ pub enum Error { #[error(transparent)] Network(#[from] network::Error), - #[error(transparent)] - PatternError(#[from] glob::PatternError), - - #[error(transparent)] - GlobError(#[from] glob::GlobError), - #[error(transparent)] IoError(#[from] std::io::Error), } @@ -57,57 +52,57 @@ impl Cmd { Ok(()) } - fn read_from_config_dir(config_dir: &Path) -> Result<(), Error> { - let pattern = config_dir - .join("contract-ids") - .join("*.json") - .to_string_lossy() - .into_owned(); - - let paths = glob::glob(&pattern)?; - let mut found = false; + fn collect_aliases(config_dir: &Path) -> Result>, Error> { + let contract_ids_dir = config_dir.join("contract-ids"); let mut map: HashMap> = HashMap::new(); - for path in paths { - let path = path?; + if !contract_ids_dir.is_dir() { + return Ok(map); + } + + for entry in fs::read_dir(&contract_ids_dir)? { + let path = entry?.path(); + + if path.extension() != Some(OsStr::new("json")) { + continue; + } if let Some(alias) = path.file_stem() { let alias = alias.to_string_lossy().into_owned(); - let content = fs::read_to_string(path)?; + let content = fs::read_to_string(&path)?; let data: alias::Data = serde_json::from_str(&content).unwrap_or_default(); - for network_passphrase in data.ids.keys() { - let network_passphrase = network_passphrase.clone(); - let contract = data - .ids - .get(&network_passphrase) - .map(ToString::to_string) - .unwrap_or_default(); + for (network_passphrase, contract_id) in &data.ids { let entry = AliasEntry { alias: alias.clone(), - contract, + contract: contract_id.clone(), }; - let list = map.entry(network_passphrase.clone()).or_default(); - - list.push(entry.clone()); + map.entry(network_passphrase.clone()) + .or_default() + .push(entry); } } } - for network_passphrase in map.keys() { - if let Some(list) = map.clone().get_mut(network_passphrase) { - println!("ℹ️ Aliases available for network '{network_passphrase}'"); + Ok(map) + } + + fn read_from_config_dir(config_dir: &Path) -> Result<(), Error> { + let mut map = Self::collect_aliases(config_dir)?; + let mut found = false; - list.sort_by(|a, b| a.alias.cmp(&b.alias)); + for (network_passphrase, list) in &mut map { + println!("ℹ️ Aliases available for network '{network_passphrase}'"); - for entry in list { - found = true; - println!("{}: {}", entry.alias, entry.contract); - } + list.sort_by(|a, b| a.alias.cmp(&b.alias)); - println!(); + for entry in list.iter() { + found = true; + println!("{}: {}", entry.alias, entry.contract); } + + println!(); } if !found { @@ -119,3 +114,49 @@ impl Cmd { Ok(()) } } + +#[cfg(test)] +mod tests { + use super::*; + use std::fs; + + fn write_alias(dir: &Path, name: &str, network: &str, contract: &str) { + let contract_ids_dir = dir.join("contract-ids"); + fs::create_dir_all(&contract_ids_dir).unwrap(); + let content = format!(r#"{{"ids":{{"{network}":"{contract}"}}}}"#); + fs::write(contract_ids_dir.join(format!("{name}.json")), content).unwrap(); + } + + #[test] + fn glob_metacharacters_in_config_dir_are_treated_as_literal() { + let tmp = tempfile::tempdir().unwrap(); + let base = tmp.path(); + + // Sibling directories that would match the glob `[12]` if unescaped. + write_alias(&base.join("cfg1"), "alpha", "testnet", "CAAAA"); + write_alias(&base.join("cfg2"), "beta", "testnet", "CBBBB"); + + // The literal directory whose name contains bracket metacharacters. + write_alias(&base.join("cfg[12]"), "gamma", "testnet", "CCCCC"); + + let map = Cmd::collect_aliases(&base.join("cfg[12]")).unwrap(); + + let aliases: Vec<&str> = map + .values() + .flat_map(|entries| entries.iter().map(|e| e.alias.as_str())) + .collect(); + + assert!( + aliases.contains(&"gamma"), + "should read alias from the literal directory" + ); + assert!( + !aliases.contains(&"alpha"), + "should not read from sibling cfg1" + ); + assert!( + !aliases.contains(&"beta"), + "should not read from sibling cfg2" + ); + } +} diff --git a/cmd/soroban-cli/src/commands/doctor.rs b/cmd/soroban-cli/src/commands/doctor.rs index 7e4321d90b..994bcabcd4 100644 --- a/cmd/soroban-cli/src/commands/doctor.rs +++ b/cmd/soroban-cli/src/commands/doctor.rs @@ -9,7 +9,7 @@ use crate::{ config::{ self, data, locator::{self, KeyType}, - network::{Network, DEFAULTS as DEFAULT_NETWORKS}, + network::{redact_rpc_url, Network, DEFAULTS as DEFAULT_NETWORKS}, }, print::Print, rpc, @@ -98,7 +98,10 @@ async fn print_network( "Network" }; - print.globeln(format!("{prefix} {name:?} ({})", network.rpc_url)); + print.globeln(format!( + "{prefix} {name:?} ({})", + redact_rpc_url(&network.rpc_url) + )); print.blankln(format!("protocol {}", version_info.protocol_version)); print.blankln(format!("rpc {}", version_info.version)); @@ -120,7 +123,7 @@ async fn inspect_networks(print: &Print, config_locator: &locator::Args) -> Resu if print_network(true, print, &name, &network).await.is_err() { print.warnln(format!( "Default network {name:?} ({}) is unreachable", - network.rpc_url + redact_rpc_url(&network.rpc_url) )); } } @@ -130,7 +133,7 @@ async fn inspect_networks(print: &Print, config_locator: &locator::Args) -> Resu if print_network(false, print, name, &network).await.is_err() { print.warnln(format!( "Network {name:?} ({}) is unreachable", - network.rpc_url + redact_rpc_url(&network.rpc_url) )); } } diff --git a/cmd/soroban-cli/src/commands/network/ls.rs b/cmd/soroban-cli/src/commands/network/ls.rs index 23ad2382f5..307ba0e98b 100644 --- a/cmd/soroban-cli/src/commands/network/ls.rs +++ b/cmd/soroban-cli/src/commands/network/ls.rs @@ -47,10 +47,63 @@ impl Cmd { format!( "Name: {name}\nRPC url: {rpc_url}\nRPC headers:{headers}\nNetwork passphrase: {passphrase}", - rpc_url = network.rpc_url, + rpc_url = crate::config::network::redact_rpc_url(&network.rpc_url), passphrase = network.network_passphrase, ) }) .collect()) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::config::network::Network; + use crate::test_utils::{with_cwd_guard, with_env_guard}; + use serial_test::serial; + + #[test] + #[serial] + fn ls_l_redacts_rpc_url_password() { + let tmp = tempfile::tempdir().unwrap(); + + with_env_guard(&["STELLAR_CONFIG_HOME", "XDG_CONFIG_HOME"], || { + with_cwd_guard(|| { + let global_cfg = tmp.path().join("global"); + std::fs::create_dir_all(&global_cfg).unwrap(); + std::env::set_var("STELLAR_CONFIG_HOME", &global_cfg); + + let work = tmp.path().join("work"); + std::fs::create_dir_all(&work).unwrap(); + std::env::set_current_dir(&work).unwrap(); + + let cmd = Cmd { + config_locator: locator::Args { config_dir: None }, + long: true, + }; + + let network = Network { + rpc_url: "https://alice:supersecret@rpc.example.com/soroban".to_string(), + rpc_headers: Vec::new(), + network_passphrase: "Test SDF Network ; September 2015".to_string(), + }; + cmd.config_locator.write_network("corp", &network).unwrap(); + + let rendered = cmd.ls_l().unwrap().join("\n\n"); + + assert!( + !rendered.contains("supersecret"), + "password leaked into `network ls -l` output: {rendered}" + ); + assert!( + rendered.contains("alice:redacted"), + "expected `alice:redacted` in `network ls -l` output: {rendered}" + ); + assert!( + rendered.contains("rpc.example.com/soroban"), + "expected host and path preserved: {rendered}" + ); + }); + }); + } +} diff --git a/cmd/soroban-cli/src/config/data.rs b/cmd/soroban-cli/src/config/data.rs index 8a5332182c..a1cbde20b2 100644 --- a/cmd/soroban-cli/src/config/data.rs +++ b/cmd/soroban-cli/src/config/data.rs @@ -4,6 +4,7 @@ use serde::{Deserialize, Serialize}; use std::str::FromStr; use url::Url; +use super::network::redact_rpc_url; use crate::xdr::{self, WriteXdr}; #[derive(thiserror::Error, Debug)] @@ -60,7 +61,7 @@ pub fn bucket_dir() -> Result { pub fn write(action: Action, rpc_url: &Url) -> Result { let data = Data { action, - rpc_url: rpc_url.to_string(), + rpc_url: redact_rpc_url(rpc_url.as_str()), }; let id = ulid::Ulid::new(); let file = actions_dir()?.join(id.to_string()).with_extension("json"); @@ -202,25 +203,90 @@ fn to_xdr(data: &impl WriteXdr) -> Result { #[cfg(test)] mod test { use super::*; + use crate::test_utils::with_env_set; use serial_test::serial; #[test] #[serial] fn test_write_read() { let t = assert_fs::TempDir::new().unwrap(); - std::env::set_var("STELLAR_DATA_HOME", t.path().to_str().unwrap()); - let rpc_uri = Url::from_str("http://localhost:8000").unwrap(); - let sim = SimulateTransactionResponse::default(); - let original_action: Action = sim.into(); - - let id = write(original_action.clone(), &rpc_uri.clone()).unwrap(); - let (action, new_rpc_uri) = read(&id).unwrap(); - assert_eq!(rpc_uri, new_rpc_uri); - match (action, original_action) { - (Action::Simulate { response: a }, Action::Simulate { response: b }) => { - assert_eq!(a.min_resource_fee, b.min_resource_fee); + with_env_set("STELLAR_DATA_HOME", t.path(), || { + let rpc_uri = Url::from_str("http://localhost:8000").unwrap(); + let sim = SimulateTransactionResponse::default(); + let original_action: Action = sim.into(); + + let id = write(original_action.clone(), &rpc_uri.clone()).unwrap(); + let (action, new_rpc_uri) = read(&id).unwrap(); + assert_eq!(rpc_uri, new_rpc_uri); + match (action, original_action) { + (Action::Simulate { response: a }, Action::Simulate { response: b }) => { + assert_eq!(a.min_resource_fee, b.min_resource_fee); + } + _ => panic!("Action mismatch"), } - _ => panic!("Action mismatch"), - } + }); + } + + #[test] + #[serial] + fn actionlog_write_redacts_rpc_url_password_on_disk() { + let t = assert_fs::TempDir::new().unwrap(); + with_env_set("STELLAR_DATA_HOME", t.path(), || { + let rpc_uri = + Url::from_str("https://alice:supersecret@rpc.example.com/soroban/rpc").unwrap(); + let action: Action = SimulateTransactionResponse::default().into(); + + let id = write(action, &rpc_uri).unwrap(); + let file = actions_dir() + .unwrap() + .join(id.to_string()) + .with_extension("json"); + let contents = std::fs::read_to_string(&file).unwrap(); + + assert!( + !contents.contains("supersecret"), + "password leaked into action-log JSON: {contents}" + ); + assert!( + contents.contains("alice"), + "username should be preserved: {contents}" + ); + assert!( + contents.contains("redacted"), + "expected literal `redacted` placeholder: {contents}" + ); + assert!( + contents.contains("rpc.example.com"), + "expected host to be preserved: {contents}" + ); + }); + } + + #[test] + #[serial] + fn actionlog_list_actions_renders_redacted_rpc_url() { + let t = assert_fs::TempDir::new().unwrap(); + with_env_set("STELLAR_DATA_HOME", t.path(), || { + let rpc_uri = + Url::from_str("https://alice:supersecret@rpc.example.com/soroban/rpc").unwrap(); + let action: Action = SimulateTransactionResponse::default().into(); + + write(action, &rpc_uri).unwrap(); + let rendered = list_actions() + .unwrap() + .into_iter() + .map(|entry| entry.to_string()) + .collect::>() + .join("\n"); + + assert!( + !rendered.contains("supersecret"), + "password leaked into ls -l render: {rendered}" + ); + assert!( + rendered.contains("alice:redacted"), + "expected `alice:redacted` in ls -l render: {rendered}" + ); + }); } } diff --git a/cmd/soroban-cli/src/config/network.rs b/cmd/soroban-cli/src/config/network.rs index d546adefe6..350b9ca46a 100644 --- a/cmd/soroban-cli/src/config/network.rs +++ b/cmd/soroban-cli/src/config/network.rs @@ -165,13 +165,23 @@ impl std::fmt::Debug for Network { .map(|(k, _)| (k.as_str(), "")) .collect(); f.debug_struct("Network") - .field("rpc_url", &self.rpc_url) + .field("rpc_url", &redact_rpc_url(&self.rpc_url)) .field("rpc_headers", &concealed) .field("network_passphrase", &self.network_passphrase) .finish() } } +pub fn redact_rpc_url(rpc_url: &str) -> String { + let Ok(mut url) = Url::parse(rpc_url) else { + return rpc_url.to_string(); + }; + if url.password().is_some() { + let _ = url.set_password(Some("redacted")); + } + url.to_string() +} + fn parse_http_header(header: &str) -> Result<(String, String), Error> { let header_components = header.splitn(2, ':'); @@ -664,4 +674,55 @@ mod tests { r#"Network { rpc_url: "http://localhost:8000/rpc", rpc_headers: [("Authorization", ""), ("X-Api-Key", "")], network_passphrase: "Test Network" }"# ); } + + #[test] + fn test_debug_conceals_rpc_url_password() { + let network = Network { + rpc_url: "https://alice:supersecret@rpc.example.com/soroban".to_string(), + network_passphrase: "Test Network".to_string(), + rpc_headers: Vec::new(), + }; + let rendered = format!("{network:?}"); + assert!( + !rendered.contains("supersecret"), + "password leaked into Debug output: {rendered}" + ); + assert!( + rendered.contains("alice:redacted"), + "expected `alice:redacted` in Debug output: {rendered}" + ); + } + + #[test] + fn redact_rpc_url_leaves_url_without_password_unchanged() { + let plain = "https://rpc.example.com/soroban"; + assert_eq!(redact_rpc_url(plain), plain); + + let user_only = "https://alice@rpc.example.com/soroban"; + assert_eq!(redact_rpc_url(user_only), user_only); + } + + #[test] + fn redact_rpc_url_replaces_password_with_placeholder() { + let with_password = "https://alice:supersecret@rpc.example.com/soroban"; + let redacted = redact_rpc_url(with_password); + assert!( + !redacted.contains("supersecret"), + "password leaked: {redacted}" + ); + assert!( + redacted.contains("alice:redacted"), + "expected `alice:redacted`: {redacted}" + ); + assert!( + redacted.contains("rpc.example.com/soroban"), + "expected host and path preserved: {redacted}" + ); + } + + #[test] + fn redact_rpc_url_returns_input_when_unparseable() { + let bad = "not a url"; + assert_eq!(redact_rpc_url(bad), bad); + } } From 237557e5fdbda695c9b3cadf641e0f5ec612599b Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 11:49:34 +0000 Subject: [PATCH 53/64] record bldbkd; forward via --meta from docker host --- .../src/commands/contract/build.rs | 6 ++++ .../src/commands/contract/build_docker.rs | 13 +++++---- .../src/commands/contract/deploy/wasm.rs | 28 +++++++++++-------- 3 files changed, 31 insertions(+), 16 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 9e9bace59f..66e2a0e7b2 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -457,6 +457,7 @@ impl Cmd { self.inject_meta( &target_file_path, &ExtraMeta { + bldbkd: Some("local".to_string()), bldimg: None, source_repo: source_repo.clone(), source_rev: source_rev.clone(), @@ -726,6 +727,7 @@ impl Cmd { // was used, and source_repo/source_rev when the workspace was a // clean git checkout. let kvs = [ + ("bldbkd", extra.bldbkd.as_deref()), ("bldimg", extra.bldimg.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), @@ -857,6 +859,10 @@ impl Cmd { /// `cliver` is always embedded (separately). `rsver` is embedded by soroban-sdk. #[derive(Default, Debug, Clone)] struct ExtraMeta { + /// `bldbkd`: build backend identifier — `local` or `docker`. Always + /// recorded so consumers can tell whether `bldimg` is expected to be + /// present. + bldbkd: Option, /// `bldimg`: fully-qualified container image used to build (e.g. /// `docker.io/stellar/stellar-cli@sha256:...`). Set when `--backend /// docker`; injected by the in-container cli via `--meta bldimg=...`, diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 7aba70da82..6dfac9659c 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -246,22 +246,25 @@ async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Erro /// entrypoint. The first element is the `$0` placeholder for sh; the rest /// become `stellar`'s actual args. /// -/// `bldimg` is forwarded as `--meta bldimg=` so the in-container -/// `--backend local` build records the image identity in the wasm meta — -/// without needing a new cli flag. +/// We deliberately do not pass `--backend local` here: the in-container cli +/// may be a release that predates this PR and doesn't know about `--backend`. +/// Its default behavior (build locally) is what we want anyway. `bldbkd` and +/// `bldimg` are forwarded as `--meta` entries (an existing flag) so the +/// in-container build records them in the wasm meta — without depending on +/// any new flags from this PR. fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { let mut argv: Vec = vec![ "sh".to_string(), // $0 placeholder "contract".to_string(), "build".to_string(), - "--backend".to_string(), - "local".to_string(), "--manifest-path".to_string(), inner.manifest_path.clone(), "--profile".to_string(), inner.profile.to_string(), "--locked".to_string(), "--meta".to_string(), + "bldbkd=docker".to_string(), + "--meta".to_string(), format!("bldimg={image}"), ]; if let Some(p) = inner.package { diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index 46606737b4..0a01bf3191 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -492,9 +492,10 @@ fn build_create_contract_tx( } /// On mainnet, warn if the wasm is missing the meta entries that indicate -/// a reproducible build (`bldimg`, `rsver`, `cliver`, `source_repo`, -/// `source_rev`). Skipped on other networks. Best-effort: parse failures are -/// silently ignored. +/// a reproducible build. `bldimg` is required only when `bldbkd` is not +/// `local` (local builds have no docker image to record). All other +/// reproducibility entries are required for both backends. Skipped on +/// other networks. Best-effort: parse failures are silently ignored. fn warn_if_mainnet_wasm_not_reproducible( wasm_bytes: &[u8], network_passphrase: &str, @@ -506,9 +507,15 @@ fn warn_if_mainnet_wasm_not_reproducible( let Ok(spec) = soroban_spec_tools::contract::Spec::new(wasm_bytes) else { return; }; - let required = [ + let find = |k: &str| -> Option { + spec.meta.iter().find_map(|e| { + let crate::xdr::ScMetaEntry::ScMetaV0(crate::xdr::ScMetaV0 { key, val }) = e; + (key.to_string() == k).then(|| val.to_string()) + }) + }; + let mut required: Vec<&str> = vec![ + "bldbkd", "cliver", - "bldimg", "rsver", "source_repo", "source_rev", @@ -516,14 +523,13 @@ fn warn_if_mainnet_wasm_not_reproducible( "bldopt_package", "bldopt_profile", ]; + // `bldimg` is only meaningful for non-local backends. + if find("bldbkd").as_deref() != Some("local") { + required.push("bldimg"); + } let missing: Vec<&str> = required .iter() - .filter(|k| { - !spec.meta.iter().any(|e| { - let crate::xdr::ScMetaEntry::ScMetaV0(crate::xdr::ScMetaV0 { key, .. }) = e; - key.to_string() == **k - }) - }) + .filter(|k| find(k).is_none()) .copied() .collect(); if missing.is_empty() { From e9b6ca0486fadd3218e63f6868e4f96e0cf82a2d Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:12:33 +0000 Subject: [PATCH 54/64] pin default docker image to digest --- .../src/commands/contract/build.rs | 12 +++++++++- .../src/commands/contract/build_docker.rs | 23 ++++++++++++++----- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 66e2a0e7b2..5c3285b543 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -158,7 +158,17 @@ impl Backend { } } -const DEFAULT_DOCKER_IMAGE: &str = "docker.io/stellar/stellar-cli:latest"; +// Pinned by digest rather than `:latest` so that: +// - the digest is recorded in `bldimg` immediately (no post-pull resolution +// that can fail on Apple Silicon docker, where `RepoDigests` is often left +// empty after a cross-platform pull), +// - builds with the default backend are reproducible day-one without the +// user having to specify `--backend docker=...@sha256:...` themselves. +// +// To bump: `docker pull --platform linux/amd64 stellar/stellar-cli:`, +// then read the `Digest:` line. +const DEFAULT_DOCKER_IMAGE: &str = + "docker.io/stellar/stellar-cli@sha256:cb2fc3116a6ace37a77ca6bb88afb4bee57fc746cd556a4373f2c3ee95d4e917"; pub fn parse_backend(s: &str) -> Result { match s { diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 6dfac9659c..93655df65d 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -62,9 +62,12 @@ pub enum Error { source: bollard::errors::Error, }, - #[error("docker image {image} has no repository digest; pin via --backend docker=/@sha256:...")] + #[error("docker image {image} has no repository digest. Either pin via --backend docker=/@sha256:..., or remove any locally-built image at this tag (`docker rmi {image}`) and let the default re-pull")] NoDigest { image: String }, + #[error("pulling docker image {image}: daemon reported error: {message}")] + PullDaemonError { image: String, message: String }, + #[error("build failed inside docker container (exit {0})")] BuildExit(i64), @@ -300,6 +303,8 @@ fn format_inner_cmd(inner: &InnerBuildArgs<'_>, image: &str) -> String { .join(" ") } +/// Pull `image` (by tag or by digest) on `linux/amd64`. Daemon-reported +/// errors in the pull event stream are surfaced as `PullDaemonError`. pub(super) async fn pull_image( docker: &Docker, image: &str, @@ -319,6 +324,12 @@ pub(super) async fn pull_image( image: image.to_string(), source: e, })? { + if let Some(detail) = item.error_detail { + return Err(Error::PullDaemonError { + image: image.to_string(), + message: detail.message.unwrap_or_else(|| "unknown".to_string()), + }); + } if let Some(status) = item.status { if status.contains("Pulling from") || status.contains("Digest") @@ -336,11 +347,11 @@ pub(super) async fn pull_image( Ok(()) } -// We pull with --platform=linux/amd64 so the recorded digest is platform-specific; -// reproducibility on `verify` depends on always pulling with that same platform. -// Returns a fully-qualified `/@sha256:` reference so -// that `verify` on a different machine can resolve it without depending on -// local registry config. +/// Returns a fully-qualified `/@sha256:` reference +/// for embedding in `bldimg`. If `image` already contains an `@sha256:...` +/// reference, it's used directly. Otherwise we fall back to inspecting the +/// local image's `RepoDigests` after pull. (For the default image we ship +/// a digest-pinned reference, so this fallback is rare.) pub(super) async fn resolve_image_digest( docker: &Docker, image: &str, From d3be20c7cf7ff62b4a786c840268938a4bbf4aa2 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:14:30 +0000 Subject: [PATCH 55/64] skip pull if image already local --- cmd/soroban-cli/src/commands/contract/build_docker.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 93655df65d..f7c6712fa7 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -118,7 +118,14 @@ pub async fn run_in_docker( let resolved = if let Some(r) = pre_resolved { r.to_string() } else { - pull_image(&docker, image, print).await?; + // Skip the pull when the image is already local. For digest-pinned + // references the digest is immutable, so a present image is the + // image. This also sidesteps a bollard quirk where pulling an + // already-present digest-pinned image surfaces the daemon's + // "cannot overwrite digest" event as a stream error. + if docker.inspect_image(image).await.is_err() { + pull_image(&docker, image, print).await?; + } resolve_image_digest(&docker, image).await? }; From 890de2991da4d0612218f470a3af0b6558c3a614 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:19:55 +0000 Subject: [PATCH 56/64] drop bldbkd meta; bldimg presence signals docker --- .../src/commands/contract/build.rs | 6 ---- .../src/commands/contract/build_docker.rs | 11 ++++---- .../src/commands/contract/deploy/wasm.rs | 28 ++++++++----------- 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 5c3285b543..146095d5fa 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -467,7 +467,6 @@ impl Cmd { self.inject_meta( &target_file_path, &ExtraMeta { - bldbkd: Some("local".to_string()), bldimg: None, source_repo: source_repo.clone(), source_rev: source_rev.clone(), @@ -737,7 +736,6 @@ impl Cmd { // was used, and source_repo/source_rev when the workspace was a // clean git checkout. let kvs = [ - ("bldbkd", extra.bldbkd.as_deref()), ("bldimg", extra.bldimg.as_deref()), ("source_repo", extra.source_repo.as_deref()), ("source_rev", extra.source_rev.as_deref()), @@ -869,10 +867,6 @@ impl Cmd { /// `cliver` is always embedded (separately). `rsver` is embedded by soroban-sdk. #[derive(Default, Debug, Clone)] struct ExtraMeta { - /// `bldbkd`: build backend identifier — `local` or `docker`. Always - /// recorded so consumers can tell whether `bldimg` is expected to be - /// present. - bldbkd: Option, /// `bldimg`: fully-qualified container image used to build (e.g. /// `docker.io/stellar/stellar-cli@sha256:...`). Set when `--backend /// docker`; injected by the in-container cli via `--meta bldimg=...`, diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index f7c6712fa7..5ef2ab2a26 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -258,10 +258,11 @@ async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Erro /// /// We deliberately do not pass `--backend local` here: the in-container cli /// may be a release that predates this PR and doesn't know about `--backend`. -/// Its default behavior (build locally) is what we want anyway. `bldbkd` and -/// `bldimg` are forwarded as `--meta` entries (an existing flag) so the -/// in-container build records them in the wasm meta — without depending on -/// any new flags from this PR. +/// Its default behavior (build locally) is what we want anyway. `bldimg` is +/// forwarded as a `--meta` entry (an existing flag) so the in-container +/// build records it in the wasm meta — without depending on any new flags +/// from this PR. The presence of `bldimg` itself signals a docker build; +/// no separate `bldbkd` field is needed. fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { let mut argv: Vec = vec![ "sh".to_string(), // $0 placeholder @@ -273,8 +274,6 @@ fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { inner.profile.to_string(), "--locked".to_string(), "--meta".to_string(), - "bldbkd=docker".to_string(), - "--meta".to_string(), format!("bldimg={image}"), ]; if let Some(p) = inner.package { diff --git a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs index 0a01bf3191..ea1164a7d5 100644 --- a/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs +++ b/cmd/soroban-cli/src/commands/contract/deploy/wasm.rs @@ -492,9 +492,10 @@ fn build_create_contract_tx( } /// On mainnet, warn if the wasm is missing the meta entries that indicate -/// a reproducible build. `bldimg` is required only when `bldbkd` is not -/// `local` (local builds have no docker image to record). All other -/// reproducibility entries are required for both backends. Skipped on +/// a reproducible build (`bldimg`, `cliver`, `rsver`, `source_repo`, +/// `source_rev`, `bldopt_*`). `bldimg` is the docker-build marker — its +/// presence indicates the wasm was built inside a recorded image; its +/// absence means a local build with no image to verify against. Skipped on /// other networks. Best-effort: parse failures are silently ignored. fn warn_if_mainnet_wasm_not_reproducible( wasm_bytes: &[u8], @@ -507,14 +508,8 @@ fn warn_if_mainnet_wasm_not_reproducible( let Ok(spec) = soroban_spec_tools::contract::Spec::new(wasm_bytes) else { return; }; - let find = |k: &str| -> Option { - spec.meta.iter().find_map(|e| { - let crate::xdr::ScMetaEntry::ScMetaV0(crate::xdr::ScMetaV0 { key, val }) = e; - (key.to_string() == k).then(|| val.to_string()) - }) - }; - let mut required: Vec<&str> = vec![ - "bldbkd", + let required = [ + "bldimg", "cliver", "rsver", "source_repo", @@ -523,13 +518,14 @@ fn warn_if_mainnet_wasm_not_reproducible( "bldopt_package", "bldopt_profile", ]; - // `bldimg` is only meaningful for non-local backends. - if find("bldbkd").as_deref() != Some("local") { - required.push("bldimg"); - } let missing: Vec<&str> = required .iter() - .filter(|k| find(k).is_none()) + .filter(|k| { + !spec.meta.iter().any(|e| { + let crate::xdr::ScMetaEntry::ScMetaV0(crate::xdr::ScMetaV0 { key, .. }) = e; + key.to_string() == **k + }) + }) .copied() .collect(); if missing.is_empty() { From 47e49007dea4e264dc78c69081a1105286ac003d Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:25:25 +0000 Subject: [PATCH 57/64] tolerate missing bldopt_* meta in verify --- .../src/commands/contract/verify.rs | 51 +++++++++++++------ 1 file changed, 36 insertions(+), 15 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/verify.rs b/cmd/soroban-cli/src/commands/contract/verify.rs index 7bcb849b90..aecd7dd4e0 100644 --- a/cmd/soroban-cli/src/commands/contract/verify.rs +++ b/cmd/soroban-cli/src/commands/contract/verify.rs @@ -57,6 +57,7 @@ pub enum Error { } impl Cmd { + #[allow(clippy::too_many_lines)] pub async fn run(&self, global_args: &global::Args) -> Result<(), Error> { let print = Print::new(global_args.quiet); @@ -70,23 +71,43 @@ impl Cmd { let cliver = find_meta(&spec.meta, "cliver").ok_or(Error::MissingMeta("cliver"))?; let bldimg = find_meta(&spec.meta, "bldimg"); let rsver = find_meta(&spec.meta, "rsver").ok_or(Error::MissingMeta("rsver"))?; - let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path") - .ok_or(Error::MissingMeta("bldopt_manifest_path"))?; - let bldopt_package = - find_meta(&spec.meta, "bldopt_package").ok_or(Error::MissingMeta("bldopt_package"))?; - let bldopt_profile = - find_meta(&spec.meta, "bldopt_profile").ok_or(Error::MissingMeta("bldopt_profile"))?; + let bldopt_manifest_path = find_meta(&spec.meta, "bldopt_manifest_path"); + let bldopt_package = find_meta(&spec.meta, "bldopt_package"); + let bldopt_profile = find_meta(&spec.meta, "bldopt_profile"); let bldopt_optimize = find_meta(&spec.meta, "bldopt_optimize").is_some(); + // Bldopts are best-effort: warn about missing ones and fall back to + // the build command's defaults so verify still tries to rebuild. + let missing_bldopts: Vec<&str> = [ + ("bldopt_manifest_path", bldopt_manifest_path.is_some()), + ("bldopt_package", bldopt_package.is_some()), + ("bldopt_profile", bldopt_profile.is_some()), + ] + .into_iter() + .filter_map(|(k, present)| (!present).then_some(k)) + .collect(); + if !missing_bldopts.is_empty() { + print.warnln(format!( + "wasm meta is missing build option entries: {missing_bldopts:?}. \ + The build may not be reproducible — defaults will be used for missing options." + )); + } + print.blankln(format!("Original wasm hash: {original_hash}")); print.blankln(format!("stellar-cli version: {cliver}")); print.blankln(format!("rust version: {rsver}")); if let Some(b) = &bldimg { print.blankln(format!("Docker image: {b}")); } - print.blankln(format!("Manifest path: {bldopt_manifest_path}")); - print.blankln(format!("Package: {bldopt_package}")); - print.blankln(format!("Profile: {bldopt_profile}")); + if let Some(p) = &bldopt_manifest_path { + print.blankln(format!("Manifest path: {p}")); + } + if let Some(p) = &bldopt_package { + print.blankln(format!("Package: {p}")); + } + if let Some(p) = &bldopt_profile { + print.blankln(format!("Profile: {p}")); + } if bldopt_optimize { print.blankln("Optimize: true"); } @@ -98,8 +119,8 @@ impl Cmd { // Resolve the manifest path relative to the cwd's git top-level so // verify works from anywhere inside the checkout. - let manifest_path = { - let p = PathBuf::from(&bldopt_manifest_path); + let manifest_path = bldopt_manifest_path.map(|s| { + let p = PathBuf::from(&s); if p.is_absolute() { p } else if let Some(root) = git_top_level() { @@ -107,12 +128,12 @@ impl Cmd { } else { p } - }; + }); let build_cmd = build::Cmd { - manifest_path: Some(manifest_path), - package: Some(bldopt_package), - profile: bldopt_profile, + manifest_path, + package: bldopt_package, + profile: bldopt_profile.unwrap_or_else(|| build::Cmd::default().profile), backend, container_args: self.container_args.clone(), rustup_toolchain: Some(rsver), From 8ae04b96651102dfc4eda95a63eb81789ed6d5fa Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:36:50 +0000 Subject: [PATCH 58/64] use image's stellar entrypoint, drop wasm target preflight --- .../src/commands/contract/build_docker.rs | 40 +++++-------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 5ef2ab2a26..d933112a05 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -156,19 +156,16 @@ async fn run_inner_build( env.push(format!("RUSTUP_TOOLCHAIN={t}")); } - // Override the image's entrypoint with `sh -c` so we can preflight-check - // the wasm target before invoking the cli. Works against the official - // `stellar/stellar-cli` image and any compatible custom image. - let entrypoint = vec![ - "sh".to_string(), - "-c".to_string(), - preflight_script(), - ]; + // Use the image's default entrypoint (`stellar` for the official image, + // and any compatible custom image must do the same). The args in `cmd` + // become the cli's arguments. We rely on the image to have the right + // wasm target installed for its default toolchain; if `RUSTUP_TOOLCHAIN` + // selects a different one, the cli/cargo handle target installation + // themselves. let argv = build_inner_argv(inner, image); let config = ContainerCreateBody { image: Some(image.to_string()), - entrypoint: Some(entrypoint), cmd: Some(argv), env: Some(env), working_dir: Some(SOURCE_DIR.to_string()), @@ -203,18 +200,6 @@ async fn run_inner_build( result } -/// Shell script run as the container's entrypoint. Preflight-checks that -/// `wasm32v1-none` is installed in the active rust toolchain, then `exec`s -/// `stellar` with the args passed via `cmd`. -fn preflight_script() -> String { - "rustup target list --installed 2>/dev/null | grep -q '^wasm32v1-none$' || { \ - echo 'error: wasm32v1-none target not installed in image; install it with `rustup target add wasm32v1-none` or use a different image' >&2; \ - exit 1; \ - }; \ - exec stellar \"$@\"" - .to_string() -} - async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Error> { docker .start_container(container_id, None::) @@ -252,9 +237,8 @@ async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Erro Ok(()) } -/// Build the argv passed via `cmd` after the `sh -c '... exec stellar "$@"'` -/// entrypoint. The first element is the `$0` placeholder for sh; the rest -/// become `stellar`'s actual args. +/// Build the argv passed via `cmd`. The image's entrypoint is `stellar`, +/// so these become the cli's arguments directly. /// /// We deliberately do not pass `--backend local` here: the in-container cli /// may be a release that predates this PR and doesn't know about `--backend`. @@ -265,7 +249,6 @@ async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Erro /// no separate `bldbkd` field is needed. fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { let mut argv: Vec = vec![ - "sh".to_string(), // $0 placeholder "contract".to_string(), "build".to_string(), "--manifest-path".to_string(), @@ -301,12 +284,7 @@ fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { } fn format_inner_cmd(inner: &InnerBuildArgs<'_>, image: &str) -> String { - // Skip the `$0` placeholder when displaying. - build_inner_argv(inner, image) - .into_iter() - .skip(1) - .collect::>() - .join(" ") + build_inner_argv(inner, image).join(" ") } /// Pull `image` (by tag or by digest) on `linux/amd64`. Daemon-reported From 5adb3d4f05ac9dd621e2e6052371d4ab76ad1f2d Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 12:51:14 +0000 Subject: [PATCH 59/64] override entrypoint to stellar to skip dbus init --- .../src/commands/contract/build_docker.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index d933112a05..4372bb87ad 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -156,16 +156,19 @@ async fn run_inner_build( env.push(format!("RUSTUP_TOOLCHAIN={t}")); } - // Use the image's default entrypoint (`stellar` for the official image, - // and any compatible custom image must do the same). The args in `cmd` - // become the cli's arguments. We rely on the image to have the right - // wasm target installed for its default toolchain; if `RUSTUP_TOOLCHAIN` - // selects a different one, the cli/cargo handle target installation - // themselves. + // Override the image's entrypoint to invoke `stellar` directly. The + // official `stellar/stellar-cli` image's entrypoint is a wrapper script + // that launches dbus + gnome-keyring before exec-ing `stellar`; that + // setup is irrelevant for `contract build` and dbus refuses to start + // when the container runs as a host UID with no `/etc/passwd` entry. + // Going straight to the binary keeps the host UID mapping intact (so + // build outputs aren't root-owned on the host) and skips the broken + // dbus init. let argv = build_inner_argv(inner, image); let config = ContainerCreateBody { image: Some(image.to_string()), + entrypoint: Some(vec!["stellar".to_string()]), cmd: Some(argv), env: Some(env), working_dir: Some(SOURCE_DIR.to_string()), From 9fe17bcf575659a5dc900c5d945bcd347cc6dc9c Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Sat, 2 May 2026 00:07:30 +1000 Subject: [PATCH 60/64] document reproducibility via build metadata --- SEP-contract-verification.md | 213 +++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100644 SEP-contract-verification.md diff --git a/SEP-contract-verification.md b/SEP-contract-verification.md new file mode 100644 index 0000000000..7fd60ac801 --- /dev/null +++ b/SEP-contract-verification.md @@ -0,0 +1,213 @@ +## Preamble + +``` +SEP: 0058 +Title: Soroban Contract Build Reproducibility and Verification +Author: +Status: Draft +Created: 2026-05-01 +Updated: 2026-05-01 +Version: 0.1.0 +Discussion: TBD +``` + +## Simple Summary + +Standardize the metadata embedded in Soroban contract wasm artifacts so that any third party can reproducibly rebuild a contract from source and verify the on-chain artifact byte-for-byte against the claimed source. + +This SEP describes a *rebuild-based* verification path. It is complementary to [SEP-55](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0055.md), which describes an *attestation-based* path (relying on signed evidence from a trusted CI environment instead of an independent rebuild). The two approaches address the same trust question with different operational and trust trade-offs; a single wasm can carry meta supporting either, both, or neither. + +## Dependencies + +- [SEP-46](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0046.md) — Contract Meta. Defines the `contractmetav0` Wasm custom section that this SEP populates with reproducibility entries. +- [SEP-55](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0055.md) — Soroban Smart Contracts Build Verification (informative). Defines an attestation-based verification mechanism. SEP-55 also defines a `source_repo` meta key. + +## Motivation + +Today, when a contract is deployed to mainnet, the on-chain artifact is opaque bytes. A user wishing to evaluate the contract has no programmatic way to confirm that those bytes were built from a particular source tree. Anyone *can* rebuild a contract and compare hashes, but the build environment — host OS, container image, rust toolchain version, cargo features, profile, manifest path — is not recorded anywhere on-chain or in the wasm itself. Without a standard set of inputs to the rebuild, two well-intentioned verifiers can produce different bytes from the same source and reach different conclusions. + +This SEP closes that gap by: + +1. Defining a stable set of meta entries that build tooling embeds in every reproducible build — enough information that a verifier can stand up a matching build environment. +2. Defining a deterministic verification algorithm — rebuild from the recorded environment, sha256, compare. + +[SEP-55](https://github.com/stellar/stellar-protocol/blob/master/ecosystem/sep-0055.md) addresses the same trust question via a different mechanism: the build CI signs an attestation, and verifiers check the attestation rather than rebuilding. Rebuild-based verification (this SEP) requires no trust in a particular CI but demands a deterministic build environment; attestation-based verification (SEP-55) accepts trust in the attesting CI but demands no rebuild. The two are not mutually exclusive — a builder can publish meta supporting both, and a verifier can pick whichever path best fits their threat model. + +## Abstract + +A reproducible Soroban contract build embeds a set of meta entries in the wasm's `contractmetav0` custom section — the toolchain version (`rsver`), the build cli's identity (`cliver`), the container image used (`bldimg`), the source repository and commit (`source_repo`, `source_rev`), and the per-package build options (`bldopt_*`). A verifier reads those entries, reconstructs the build environment, rebuilds, and compares the resulting wasm's sha256 to the original. This SEP defines the entries and the algorithm; it does not prescribe specific tooling. + +## Specification + +### 1. Build meta entries + +A reproducible build embeds the following entries in the wasm's `contractmetav0` custom section. Each entry is a `ScMetaEntry::ScMetaV0` with a UTF-8 `key` and UTF-8 `val`. Values that fail their format regex below are not considered conformant. + +| key | description | format regex | +|---|---|---| +| `cliver` | Build cli version + git rev. | `^\d+\.\d+\.\d+(-[A-Za-z0-9.+-]+)?#([0-9a-f]{40}(-dirty)?)?$` | +| `rsver` | Resolved rustc version used for the build. | `^\d+\.\d+\.\d+(-[A-Za-z0-9.+-]+)?$` | +| `bldimg` | Fully-qualified container image used for the build, pinned by digest. Required for builds claiming `docker`-class reproducibility; absent for host-local builds. | `^[^@\s]+@sha256:[0-9a-f]{64}$` | +| `source_repo` | HTTPS URL of the source repository's origin. Recorded only when the working tree was clean at build time. | `^https?://\S+$` | +| `source_rev` | Full 40-char SHA-1 of the source commit (`HEAD`). Recorded only when the working tree was clean at build time. | `^[0-9a-f]{40}$` | +| `bldopt_manifest_path` | Path to the package's `Cargo.toml` relative to the repository root. | `^([^/\s]+/)*Cargo\.toml$` | +| `bldopt_package` | Cargo package name being built. | `^[A-Za-z][A-Za-z0-9_-]*$` | +| `bldopt_profile` | Cargo profile (e.g. `release`). | `^[A-Za-z][A-Za-z0-9_-]*$` | +| `bldopt_optimize` | Present and equal to `true` iff post-build wasm optimization (`wasm-opt`) was applied. | `^true$` | + +Tooling may inject additional, application-specific entries; verifiers ignore unrecognized keys. + +### 2. Build classes + +A wasm's reproducibility class is determined by the meta entries present: + +- **Class A — container-pinned**: all of `cliver`, `rsver`, `bldimg`, `source_repo`, `source_rev`, `bldopt_manifest_path`, `bldopt_package`, `bldopt_profile` are present and conformant. The build is reproducible to the bytes of `bldimg`'s pulled image content; verification produces identical bytes on any host with a working docker daemon and access to the registry. +- **Class B — host best-effort**: all of `cliver`, `rsver`, `source_repo`, `source_rev`, `bldopt_manifest_path`, `bldopt_package`, `bldopt_profile` are present and conformant; `bldimg` is absent. The build was performed on the host with a non-containerized toolchain. Verification is best-effort and may produce different bytes on different hosts due to environment differences not captured in meta. +- **Class C — non-reproducible**: any required entry above is absent. Verification cannot be claimed. + +Tooling deploying to mainnet should warn when a wasm is Class B or C. + +### 3. Verification algorithm + +Given an on-chain or offline wasm `W`, a verifier produces a verification result as follows: + +1. Compute `sha256(W)` → `original_hash`. +2. Parse `W`'s `contractmetav0` section. Extract the entries listed in §1. +3. Determine the build class per §2. If Class C, verification fails: the wasm carries insufficient meta to be reproduced. +4. Source acquisition is the verifier's responsibility. The verifier ensures a checkout of `source_repo` at commit `source_rev` is available before invoking the rebuild; this SEP does not mandate a clone strategy. +5. Reconstruct the build environment: + - For Class A: pull `bldimg` (digest-pinned, so deterministic) and run the rebuild inside that container with the source checkout bind-mounted in. The rust toolchain inside the container MUST be the version recorded in `rsver` (e.g. via the `RUSTUP_TOOLCHAIN` environment variable when rustup is the in-image toolchain manager). + - For Class B: use the host rust toolchain pinned to `rsver` (e.g. via `cargo +` when rustup is the host toolchain manager). +6. In the reconstructed environment, perform a cargo build of the recorded package. The build MUST: + - target `wasm32v1-none`, + - use `--locked` (so `Cargo.lock` is honored verbatim), + - use the package's `Cargo.toml` at `bldopt_manifest_path`, + - target package `bldopt_package`, + - use cargo profile `bldopt_profile`, + - apply `wasm-opt` post-build optimization iff `bldopt_optimize` is present and equal to `true`, + - and otherwise use cargo defaults (no extra features, default dependency resolution, etc.). +7. Locate the rebuilt wasm artifact for `bldopt_package` and compute its sha256 → `rebuilt_hash`. +8. Verification succeeds iff `rebuilt_hash == original_hash`. + +In a workspace with multiple cdylib packages, a verifier MAY rebuild all packages and search for any rebuilt artifact whose hash matches `original_hash`; this accommodates cases where the recorded `bldopt_package` cannot be honored verbatim. + +## Limitations + +- This SEP makes the *build* reproducible. It does not make the *source* trustworthy. Verification proves the deployed bytes match a particular source tree; whether that source is correct, audited, or non-malicious is an orthogonal concern. +- Class B (host best-effort) verification is environment-dependent. Two verifiers may legitimately disagree. +- Verifiers depend on the integrity of the docker image registry hosting `bldimg` and on the integrity of the source host (`source_repo`). Compromise of either invalidates the verification. +- The current `cliver` regex permits three legacy install-path renderings (clean sha, dirty sha, empty rev). A future revision of this SEP will narrow that regex once cli build tooling normalizes the rendering. + +## Design Rationale + +**Why digest-pin `bldimg`?** A registry tag is mutable; a content digest is not. A verifier pulling a digest is guaranteed to receive the same bytes the original builder used. Recording a tag would punt the reproducibility question to the registry's mutable state. + +**Why allow Class B at all?** Mandating containers would lock out builders without a working daemon (CI environments without privileged docker, restricted corporate networks, hobbyists on locked-down machines). A best-effort tier with explicit "may not match" semantics is more useful than no record at all, provided consumers understand the tier difference. + +**Why record `rsver`?** Cargo's wasm output is sensitive to rustc version. Without recording the toolchain, two verifiers on different default toolchains would legitimately produce different bytes from the same source. Recording `rsver` lets each verifier pin to the exact version the original build used (typically via `cargo +` on hosts, or `RUSTUP_TOOLCHAIN` inside containers). + +**Why does this SEP not define a verification result format?** Verification is a continuous activity and verifiers vary in audience, storage, and tooling preferences. Mandating a schema would over-constrain implementations whose only obligation, from this SEP's point of view, is to follow §3 faithfully. Verifiers are free to publish however suits them; consumers that aggregate across verifiers can adapt to each verifier's format. + +**Why have both this SEP and SEP-55?** They answer overlapping but distinct questions. SEP-55 (attestation-based) answers "did a particular trusted CI compile this wasm from this source?" — useful when the verifier is willing to trust the CI provider's signing infrastructure and wants to skip the cost of rebuilding. This SEP (rebuild-based) answers "does this source, compiled with the recorded environment, produce these exact bytes?" — useful when the verifier wants no third-party trust assumption beyond the source host and the container image registry. A wasm that carries meta supporting both gives consumers maximum flexibility; a verifier picks the path matching their threat model. + +**Source-repo format alignment with SEP-55.** SEP-55 defines `source_repo` as `github:/`. This SEP defines it as an HTTPS URL — the form already produced by existing build tooling. Both SEPs are draft and a future revision should converge on a single format (or define independent keys) to avoid ambiguity. Until then, tooling consuming `source_repo` should be tolerant of either form and able to derive a clone URL from each. + +**Why `bldopt_*` per-field rather than a single struct?** Meta entries are flat key-value strings. Encoding a struct (e.g. JSON) in a single value is opaque to consumers that just want one field; flat keys are inspectable with grep. + +## Security Concerns + +- **Source-host trust.** `source_repo` is a URL the verifier fetches from; a compromised host (or an attacker between verifier and host) can serve a different commit at the recorded `source_rev`. SHA-1 collisions in git are not considered practical at the time of writing but verifiers SHOULD prefer hosts that publish signed tags or commit signatures where available. +- **Container-image trust.** A digest pin is integrity-protective only as long as the digest references a valid manifest at the registry. Registry compromise (or image deletion) breaks verification. +- **Verifier compromise.** A verifier can publish false-positive attestations. Consumers SHOULD weigh attestations by verifier reputation and aggregate from multiple independent verifiers. +- **Verifier non-determinism.** A verifier MUST itself be reproducible (pinned cli version, pinned base OS). A drifting verifier produces noise that consumers cannot distinguish from genuine build divergence. +- **Meta tampering.** A malicious builder could publish a wasm with deceptive `source_repo`/`source_rev` claims that don't reproduce. Verification's value is precisely catching this case — a non-matching rebuild is a positive signal that the meta is wrong. +- **What this does not protect against.** This SEP says nothing about the soundness of the source itself. Verified builds are necessary but not sufficient for trust. + +## Changelog + +* `v0.1.0` - Initial draft. + +## Appendix A: Example Implementations + +The following implementations demonstrate the spec in practice. They are illustrative, not normative — any tool that produces conformant meta and any verifier that follows §3 satisfies this SEP. + +### A.1. `stellar` CLI (build and verify) + +The Stellar Development Foundation's `stellar` command-line tool implements both meta-embedding (at build time) and the §3 verification algorithm (via a `verify` subcommand). + +**Producing a Class A wasm:** + +``` +$ stellar contract build --backend docker +ℹ Pulling from stellar/stellar-cli + Digest: sha256:cb2fc3... +ℹ contract build --manifest-path /source/contracts/foo/Cargo.toml --profile release --locked --meta bldimg=docker.io/stellar/stellar-cli@sha256:cb2fc3... + Compiling foo v… +✅ Build Complete + +$ stellar contract info meta --wasm target/wasm32v1-none/release/foo.wasm +cliver=26.0.0#abc1234567890abcdef1234567890abcdef12345 +rsver=1.83.0 +bldimg=docker.io/stellar/stellar-cli@sha256:cb2fc3... +source_repo=https://github.com/user/my-contract +source_rev=abc1234567890abcdef1234567890abcdef12345 +bldopt_manifest_path=contracts/foo/Cargo.toml +bldopt_package=foo +bldopt_profile=release +``` + +**Verifying a deployed contract:** + +``` +$ stellar contract build verify --contract-id CXXX… --network mainnet +ℹ Loading contract from network... +ℹ Loading meta from contract... + Original wasm hash: 9f86d081… + stellar-cli version: 26.0.0#abc1234… + rust version: 1.83.0 + Docker image: docker.io/stellar/stellar-cli@sha256:cb2fc3... + Manifest path: contracts/foo/Cargo.toml + Package: foo + Profile: release +ℹ contract build --manifest-path /source/contracts/foo/Cargo.toml --profile release --locked --meta bldimg=... + Compiling foo v… +✅ Build Complete +✅ Verified: rebuilt foo wasm matches 9f86d081… +``` + +In the docker case, the `verify` subcommand pulls `bldimg` and runs the build inside it, setting `RUSTUP_TOOLCHAIN=` so the in-container rust matches the recorded version. In the local case, it invokes `cargo +` against the host toolchain. + +### A.2. CI-driven verification at scale (`contract-verifications`) + +The `stellar-experimental/contract-verifications` repository ([link](https://github.com/stellar-experimental/contract-verifications)) is one example of running §3 in CI. It runs a daily GitHub Actions job that walks an upstream wasm corpus, performs §3 against each entry using `stellar contract build verify`, and publishes per-wasm JSON records under version control. + +A minimal sketch of the same pattern: + +```yaml +name: verify +on: + schedule: [{ cron: '0 6 * * *' }] +jobs: + verify: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - run: curl -sSf https://soroban.stellar.org/install | sh + - run: | + for wasm in wasms/*.wasm; do + hash=$(sha256sum "$wasm" | cut -d' ' -f1) + test -f "results/$hash.json" && continue # idempotency + stellar contract build verify --wasm "$wasm" \ + | tee "results/$hash.json" + done + - run: git add results/ && git commit -m "verify run" && git push +``` + +A verifier following this pattern in production should: + +- pin their own verifier-tool version so their results are themselves reproducible, and disclose that version alongside each result; +- record the original wasm sha256, the rebuilt sha256, and the verification outcome at a minimum; +- avoid mutating published results — re-running a verification produces a new record rather than overwriting an existing one; +- skip wasms already verified by the same verifier at the same tool version (idempotency). + +The choice of storage, scheduling, and record format is left to the verifier; conformance to §3 is the only thing that makes results comparable across verifiers. From cd443d226c6cde2799b59c18e39f0db31412e996 Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 14:15:15 +0000 Subject: [PATCH 61/64] forward source/bldopt meta from host on docker builds --- .../src/commands/contract/build.rs | 58 +++++++++++++++++-- .../src/commands/contract/build_docker.rs | 11 ++-- 2 files changed, 60 insertions(+), 9 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build.rs b/cmd/soroban-cli/src/commands/contract/build.rs index 146095d5fa..ddfe5fcf5c 100644 --- a/cmd/soroban-cli/src/commands/contract/build.rs +++ b/cmd/soroban-cli/src/commands/contract/build.rs @@ -384,6 +384,9 @@ impl Cmd { target_dir.as_std_path(), workspace_root, mount_root, + git_root.as_deref(), + source_repo.as_deref(), + source_rev.as_deref(), &wasm_target, ) .await; @@ -525,11 +528,12 @@ impl Cmd { Ok(built_contracts) } - /// Orchestrate a `--backend docker` build: pull the requested stellar-cli - /// image and run `stellar contract build --backend local` inside it + /// Orchestrate a `--backend docker` build: pull the requested + /// stellar-cli image and run `stellar contract build` inside it /// against the bind-mounted source. The in-container cli does cargo + - /// meta injection + spec filtering + optional wasm-opt itself; the host - /// only orchestrates and copies outputs to `--out-dir` if requested. + /// meta injection + spec filtering + optional wasm-opt itself; the + /// host only orchestrates and copies outputs to `--out-dir` if + /// requested. #[allow(clippy::too_many_arguments)] async fn run_docker( &self, @@ -539,6 +543,9 @@ impl Cmd { target_dir: &Path, workspace_root: &Path, mount_root: &Path, + git_root: Option<&Path>, + source_repo: Option<&str>, + source_rev: Option<&str>, wasm_target: &str, ) -> Result, Error> { // The user's --manifest-path (if any) is a host path; translate to @@ -555,6 +562,47 @@ impl Cmd { .to_string_lossy() .into_owned(); + // TODO(transitional): forward host-detected `source_*` and + // `bldopt_*` reproducibility meta as `--meta` entries to the + // in-container cli. Released `stellar/stellar-cli` images today + // don't auto-inject these on build, so without this pass-through + // the resulting wasm would be missing them. + // + // Once a `stellar/stellar-cli` image carrying this PR's auto- + // injection logic is published and adopted as the default, the + // in-container cli will set these itself (and its values, being + // first in the meta section, take precedence over the user-meta + // entries we add here per `find_meta`'s first-match semantics). + // At that point, remove this block and let the in-container cli + // be the source of truth. + let mut meta = self.build_args.meta.clone(); + if let Some(s) = source_repo { + meta.push(("source_repo".to_string(), s.to_string())); + } + if let Some(s) = source_rev { + meta.push(("source_rev".to_string(), s.to_string())); + } + meta.push(("bldopt_profile".to_string(), self.profile.clone())); + if self.build_args.optimize { + meta.push(("bldopt_optimize".to_string(), "true".to_string())); + } + // Per-package fields are only safe to forward when exactly one + // package will be built — passing a single value to a multi-package + // workspace build would attach the same `bldopt_package` / + // `bldopt_manifest_path` to every wasm. For multi-package builds we + // skip them and let the in-container cli supply them per-package + // (newer images will; older won't). + if packages.len() == 1 { + let p = &packages[0]; + meta.push(("bldopt_package".to_string(), p.name.clone())); + if let Some(rel) = git_root.and_then(|gr| { + pathdiff::diff_paths(&p.manifest_path, gr) + .map(|p| p.to_string_lossy().into_owned()) + }) { + meta.push(("bldopt_manifest_path".to_string(), rel)); + } + } + let inner = build_docker::InnerBuildArgs { manifest_path: in_container_manifest, package: self.package.as_deref(), @@ -563,7 +611,7 @@ impl Cmd { all_features: self.all_features, no_default_features: self.no_default_features, optimize: self.build_args.optimize, - meta: &self.build_args.meta, + meta, }; build_docker::run_in_docker( diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index 4372bb87ad..aed17b8e67 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -79,8 +79,11 @@ pub enum Error { } /// Forwarded host build args used to construct the inner -/// `stellar contract build --backend local` invocation. `manifest_path` is -/// expected to already be in container-relative form (`/source/...`). +/// `stellar contract build` invocation. `manifest_path` is expected to +/// already be in container-relative form (`/source/...`). `meta` holds both +/// the user's `--meta` entries and any host-detected entries (e.g. +/// `source_repo`, `source_rev`, `bldopt_*`) that are forwarded as +/// transitional pass-throughs while the published cli image catches up. pub struct InnerBuildArgs<'a> { pub manifest_path: String, pub package: Option<&'a str>, @@ -89,7 +92,7 @@ pub struct InnerBuildArgs<'a> { pub all_features: bool, pub no_default_features: bool, pub optimize: bool, - pub meta: &'a [(String, String)], + pub meta: Vec<(String, String)>, } /// Pull the image (if needed), then run the in-container @@ -279,7 +282,7 @@ fn build_inner_argv(inner: &InnerBuildArgs<'_>, image: &str) -> Vec { if inner.optimize { argv.push("--optimize".to_string()); } - for (k, v) in inner.meta { + for (k, v) in &inner.meta { argv.push("--meta".to_string()); argv.push(format!("{k}={v}")); } From 0006b28e5dc673725035cdfb3cb27d903ba0754d Mon Sep 17 00:00:00 2001 From: Leigh <351529+leighmcculloch@users.noreply.github.com> Date: Fri, 1 May 2026 15:32:14 +0000 Subject: [PATCH 62/64] entrypoint installs wasm target before stellar --- .../src/commands/contract/build_docker.rs | 49 ++++++++++++++----- 1 file changed, 37 insertions(+), 12 deletions(-) diff --git a/cmd/soroban-cli/src/commands/contract/build_docker.rs b/cmd/soroban-cli/src/commands/contract/build_docker.rs index aed17b8e67..c0683565c4 100644 --- a/cmd/soroban-cli/src/commands/contract/build_docker.rs +++ b/cmd/soroban-cli/src/commands/contract/build_docker.rs @@ -159,19 +159,36 @@ async fn run_inner_build( env.push(format!("RUSTUP_TOOLCHAIN={t}")); } - // Override the image's entrypoint to invoke `stellar` directly. The - // official `stellar/stellar-cli` image's entrypoint is a wrapper script - // that launches dbus + gnome-keyring before exec-ing `stellar`; that - // setup is irrelevant for `contract build` and dbus refuses to start - // when the container runs as a host UID with no `/etc/passwd` entry. - // Going straight to the binary keeps the host UID mapping intact (so - // build outputs aren't root-owned on the host) and skips the broken - // dbus init. + // Override the image's entrypoint with a small shim that ensures the + // wasm target is installed for the active rust toolchain, then exec's + // `stellar`. Two reasons: + // + // - When `RUSTUP_TOOLCHAIN=` selects a toolchain other than the + // image's default (typical at verify time), the image's pre-installed + // `wasm32v1-none` target is associated with the *other* toolchain, + // not the selected one — `cargo build --target=wasm32v1-none` would + // fail. `rustup target add` is idempotent (and quick, when the target + // is already present) so always running it is safe. + // - The official `stellar/stellar-cli` image's stock entrypoint is a + // wrapper script that launches dbus + gnome-keyring before exec-ing + // `stellar`; that setup is irrelevant for `contract build` and dbus + // refuses to start when the container runs as a host UID with no + // `/etc/passwd` entry. Skipping it keeps the host UID mapping intact. + // + // TODO: remove this entrypoint override once + // https://github.com/stellar/stellar-cli/issues/2545 is implemented and + // the published image's entrypoint installs the wasm target itself + // (and doesn't drag dbus/gnome-keyring into the contract-build path). + let entrypoint = vec![ + "sh".to_string(), + "-c".to_string(), + "rustup target add wasm32v1-none --quiet && exec stellar \"$@\"".to_string(), + ]; let argv = build_inner_argv(inner, image); let config = ContainerCreateBody { image: Some(image.to_string()), - entrypoint: Some(vec!["stellar".to_string()]), + entrypoint: Some(entrypoint), cmd: Some(argv), env: Some(env), working_dir: Some(SOURCE_DIR.to_string()), @@ -243,8 +260,10 @@ async fn stream_and_wait(docker: &Docker, container_id: &str) -> Result<(), Erro Ok(()) } -/// Build the argv passed via `cmd`. The image's entrypoint is `stellar`, -/// so these become the cli's arguments directly. +/// Build the argv passed via `cmd`. The image's entrypoint is overridden +/// to `sh -c '