use std::cell::RefCell;
use std::os::unix::fs::PermissionsExt;
use std::path::{Path, PathBuf};
use std::process::{Command, Stdio};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
pub struct BakeRequest {
pub image: String,
pub name: Option<String>,
pub runtime: String,
pub guest_port: u16,
pub memory_mib: u32,
pub vcpus: u32,
pub pull_policy: String,
pub snapshots_dir: PathBuf,
pub cmd_override: Option<String>,
pub extra_args: Vec<String>,
}
pub struct PipelinedWarmup<'a> {
pub warm_dir: PathBuf,
pub warm_tag: String,
pub callback: Box<dyn FnOnce(&PipelinedWarmupContext) -> Result<(), String> + Send + 'a>,
}
pub struct PipelinedWarmupContext {
pub vsock_mux_path: PathBuf,
pub vsock_exec_path: PathBuf,
}
trait ImageSource {
fn local_arch(&self, image: &str) -> Option<String>;
fn pull_arm64(&self, image: &str, force_refresh: bool) -> Result<(), String>;
fn inspect(&self, image: &str) -> Result<serde_json::Value, String>;
fn save_arm64(&self, image: &str, work_dir: &Path) -> Result<PathBuf, String>;
}
struct DockerImageSource;
impl ImageSource for DockerImageSource {
fn local_arch(&self, image: &str) -> Option<String> {
let mut cmd = Command::new("docker");
cmd.arg("image")
.arg("inspect")
.arg("--format")
.arg("{{.Architecture}}")
.arg(image)
.stderr(Stdio::null());
command_output(cmd, "docker image inspect architecture")
.ok()
.map(|s| s.trim().to_owned())
.filter(|s| !s.is_empty() && s != "<no value>")
}
fn pull_arm64(&self, image: &str, _force_refresh: bool) -> Result<(), String> {
let mut cmd = Command::new("docker");
cmd.arg("pull")
.arg("--platform=linux/arm64")
.arg(image)
.stdout(Stdio::null());
run_status(cmd, "docker pull")
}
fn inspect(&self, image: &str) -> Result<serde_json::Value, String> {
let mut inspect_cmd = Command::new("docker");
inspect_cmd.arg("image").arg("inspect").arg(image);
let inspect = command_output(inspect_cmd, "docker image inspect")?;
let inspect_json: serde_json::Value = serde_json::from_str(&inspect)
.map_err(|e| format!("docker image inspect JSON: {e}"))?;
inspect_json
.as_array()
.and_then(|a| a.first())
.cloned()
.ok_or_else(|| format!("docker image inspect returned no records for {image}"))
}
fn save_arm64(&self, image: &str, work_dir: &Path) -> Result<PathBuf, String> {
let save_tar = work_dir.join("image.tar");
let mut save = Command::new("docker");
save.arg("save")
.arg("--platform=linux/arm64")
.arg(image)
.arg("-o")
.arg(&save_tar)
.stderr(Stdio::null());
run_status(save, "docker save")?;
let save_dir = work_dir.join("_save");
std::fs::create_dir_all(&save_dir)
.map_err(|e| format!("create save dir {}: {e}", save_dir.display()))?;
let mut tar = Command::new("tar");
tar.arg("-xf").arg(&save_tar).arg("-C").arg(&save_dir);
run_status(tar, "tar extract docker save")?;
let _ = std::fs::remove_file(&save_tar);
Ok(save_dir)
}
}
#[derive(Clone)]
struct RegistryImageRef {
registry: String,
repository: String,
reference: String,
}
#[derive(Clone)]
struct RegistryCreds {
user: String,
pass: String,
}
impl RegistryCreds {
fn basic_header_value(&self) -> String {
let mut joined = String::with_capacity(self.user.len() + self.pass.len() + 1);
joined.push_str(&self.user);
joined.push(':');
joined.push_str(&self.pass);
format!("Basic {}", b64_encode(joined.as_bytes()))
}
}
fn docker_config_auth(registry: &str) -> Option<RegistryCreds> {
let config_path = std::env::var_os("DOCKER_CONFIG")
.map(|d| PathBuf::from(d).join("config.json"))
.unwrap_or_else(|| home_join(".docker/config.json"));
let text = std::fs::read_to_string(&config_path).ok()?;
let json: serde_json::Value = serde_json::from_str(&text).ok()?;
let auths = json.get("auths")?.as_object()?;
let docker_hub_aliases = ["docker.io", "index.docker.io", "registry-1.docker.io"];
let is_docker_hub = docker_hub_aliases.contains(®istry);
let mut keys: Vec<String> = vec![
registry.to_string(),
format!("https://{registry}"),
format!("https://{registry}/"),
format!("https://{registry}/v1/"),
format!("https://{registry}/v2/"),
];
if is_docker_hub {
for alias in &docker_hub_aliases {
keys.push(format!("https://{alias}/v1/"));
keys.push(format!("https://{alias}/v2/"));
keys.push(format!("https://{alias}"));
keys.push((*alias).to_string());
}
}
for key in &keys {
let entry = match auths.get(key) {
Some(e) => e,
None => continue,
};
let auth_b64 = entry.get("auth").and_then(|v| v.as_str())?;
let decoded = b64_decode(auth_b64.trim())?;
let s = String::from_utf8(decoded).ok()?;
let (user, pass) = s.split_once(':')?;
return Some(RegistryCreds {
user: user.to_owned(),
pass: pass.to_owned(),
});
}
None
}
fn b64_decode(s: &str) -> Option<Vec<u8>> {
let mut lookup = [255u8; 256];
let table = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
for (i, &b) in table.iter().enumerate() {
lookup[b as usize] = i as u8;
}
let mut out = Vec::new();
let mut buf: u32 = 0;
let mut bits: u32 = 0;
for &b in s.as_bytes() {
if b == b'=' {
break;
}
if matches!(b, b'\n' | b'\r' | b' ' | b'\t') {
continue;
}
let v = lookup[b as usize];
if v == 255 {
return None;
}
buf = (buf << 6) | u32::from(v);
bits += 6;
if bits >= 8 {
bits -= 8;
out.push((buf >> bits) as u8);
buf &= (1u32 << bits).wrapping_sub(1);
}
}
Some(out)
}
fn b64_encode(bytes: &[u8]) -> String {
let table = b"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
let mut out = String::with_capacity(((bytes.len() + 2) / 3) * 4);
let mut i = 0;
while i + 3 <= bytes.len() {
let n = ((bytes[i] as u32) << 16) | ((bytes[i + 1] as u32) << 8) | (bytes[i + 2] as u32);
out.push(table[((n >> 18) & 0x3f) as usize] as char);
out.push(table[((n >> 12) & 0x3f) as usize] as char);
out.push(table[((n >> 6) & 0x3f) as usize] as char);
out.push(table[(n & 0x3f) as usize] as char);
i += 3;
}
let rem = bytes.len() - i;
if rem == 1 {
let n = (bytes[i] as u32) << 16;
out.push(table[((n >> 18) & 0x3f) as usize] as char);
out.push(table[((n >> 12) & 0x3f) as usize] as char);
out.push('=');
out.push('=');
} else if rem == 2 {
let n = ((bytes[i] as u32) << 16) | ((bytes[i + 1] as u32) << 8);
out.push(table[((n >> 18) & 0x3f) as usize] as char);
out.push(table[((n >> 12) & 0x3f) as usize] as char);
out.push(table[((n >> 6) & 0x3f) as usize] as char);
out.push('=');
}
out
}
struct RegistryImageSource {
image: RegistryImageRef,
cached_layout: RefCell<Option<RegistryLayoutCache>>,
}
struct RegistryLayoutCache {
work_dir: PathBuf,
layout: PathBuf,
}
impl RegistryImageSource {
fn cached_layout(&self, force_refresh: bool) -> Result<PathBuf, String> {
if let Some(cache) = self.cached_layout.borrow().as_ref() {
return Ok(cache.layout.clone());
}
let work_dir = temp_work_dir("supermachine-registry-layout")?;
let layout = work_dir.join("_oci");
let result = fetch_registry_to_oci_layout(&self.image, &layout, force_refresh);
if let Err(err) = result {
let _ = std::fs::remove_dir_all(work_dir);
return Err(err);
}
*self.cached_layout.borrow_mut() = Some(RegistryLayoutCache {
work_dir,
layout: layout.clone(),
});
Ok(layout)
}
}
impl Drop for RegistryImageSource {
fn drop(&mut self) {
if let Some(cache) = self.cached_layout.get_mut().take() {
let _ = std::fs::remove_dir_all(cache.work_dir);
}
}
}
impl ImageSource for RegistryImageSource {
fn local_arch(&self, _image: &str) -> Option<String> {
None
}
fn pull_arm64(&self, _image: &str, force_refresh: bool) -> Result<(), String> {
self.cached_layout(force_refresh).map(|_| ())
}
fn inspect(&self, image: &str) -> Result<serde_json::Value, String> {
let layout = self.cached_layout(false)?;
inspect_oci_layout(image, &layout)
}
fn save_arm64(&self, _image: &str, work_dir: &Path) -> Result<PathBuf, String> {
if let Some(cache) = self.cached_layout.borrow().as_ref() {
return Ok(cache.layout.clone());
}
let layout = work_dir.join("_oci");
fetch_registry_to_oci_layout(&self.image, &layout, false)?;
Ok(layout)
}
}
struct HttpResponse {
status: u16,
headers: String,
body: Vec<u8>,
}
struct RegistryManifest {
manifest_digest: String,
manifest_bytes: Vec<u8>,
manifest: serde_json::Value,
config_digest: String,
config_bytes: Vec<u8>,
blob_cache_hits: usize,
blob_downloads: usize,
ref_cache_hit: bool,
ref_cache_age_ms: Option<u128>,
ref_cache_ttl_ms: u128,
}
struct OciLayoutImageSource {
path: PathBuf,
}
impl ImageSource for OciLayoutImageSource {
fn local_arch(&self, _image: &str) -> Option<String> {
Some("arm64".to_owned())
}
fn pull_arm64(&self, _image: &str, _force_refresh: bool) -> Result<(), String> {
Ok(())
}
fn inspect(&self, image: &str) -> Result<serde_json::Value, String> {
inspect_oci_layout(image, &self.path)
}
fn save_arm64(&self, _image: &str, _work_dir: &Path) -> Result<PathBuf, String> {
Ok(self.path.clone())
}
}
struct OciArchiveImageSource {
path: PathBuf,
}
impl ImageSource for OciArchiveImageSource {
fn local_arch(&self, _image: &str) -> Option<String> {
Some("arm64".to_owned())
}
fn pull_arm64(&self, _image: &str, _force_refresh: bool) -> Result<(), String> {
Ok(())
}
fn inspect(&self, image: &str) -> Result<serde_json::Value, String> {
let work_dir = temp_work_dir("supermachine-oci-archive-inspect")?;
let result = (|| {
let layout = extract_oci_archive(&self.path, &work_dir)?;
inspect_oci_layout(image, &layout)
})();
let _ = std::fs::remove_dir_all(work_dir);
result
}
fn save_arm64(&self, _image: &str, work_dir: &Path) -> Result<PathBuf, String> {
extract_oci_archive(&self.path, work_dir)
}
}
pub(crate) fn supermachine_worker_bin(root: &Path) -> PathBuf {
if let Some(p) = crate::codesign::locate_worker_bin() {
#[cfg(target_os = "macos")]
{
let _ = crate::codesign::ensure_worker_signed(&p);
}
return p;
}
for candidate in [
"target/release/supermachine-worker",
"bin/supermachine-worker",
] {
let p = root.join(candidate);
if p.is_file() {
return p;
}
}
root.join("target/release/supermachine-worker")
}
pub(crate) fn supermachine_kernel(root: &Path) -> PathBuf {
for candidate in [
"crates/supermachine-kernel/kernel",
"share/supermachine/kernel",
] {
let p = root.join(candidate);
if p.is_file() {
return p;
}
}
if let Some(p) = crate::assets::AssetPaths::discover().kernel {
return p;
}
root.join("crates/supermachine-kernel/kernel")
}
fn select_image_source(image: &str) -> Result<Box<dyn ImageSource>, String> {
if let Some(path) = image.strip_prefix("oci-layout:") {
let path = PathBuf::from(path);
if !path.join("index.json").is_file() {
return Err(format!("OCI layout missing index.json: {}", path.display()));
}
return Ok(Box::new(OciLayoutImageSource { path }));
}
if let Some(path) = image.strip_prefix("oci-archive:") {
let path = PathBuf::from(path);
if !path.is_file() {
return Err(format!("OCI archive not found: {}", path.display()));
}
return Ok(Box::new(OciArchiveImageSource { path }));
}
if std::env::var("SUPERMACHINE_IMAGE_SOURCE")
.map(|v| v == "docker")
.unwrap_or(false)
{
return Ok(Box::new(DockerImageSource));
}
Ok(Box::new(RegistryImageSource {
image: parse_registry_image_ref(image)?,
cached_layout: RefCell::new(None),
}))
}
fn parse_registry_image_ref(image: &str) -> Result<RegistryImageRef, String> {
let (name, reference) = if let Some((name, digest)) = image.rsplit_once('@') {
(name, digest.to_owned())
} else {
let slash = image.rfind('/');
let colon = image.rfind(':');
if let Some(colon) = colon.filter(|colon| slash.map(|slash| *colon > slash).unwrap_or(true))
{
(&image[..colon], image[colon + 1..].to_owned())
} else {
(image, "latest".to_owned())
}
};
let first = name.split('/').next().unwrap_or_default();
let has_registry = first.contains('.') || first.contains(':') || first == "localhost";
let (registry, repository) = if has_registry {
let Some((registry, repository)) = name.split_once('/') else {
return Err(format!("invalid registry image reference: {image}"));
};
(registry.to_owned(), repository.to_owned())
} else {
let repository = if name.contains('/') {
name.to_owned()
} else {
format!("library/{name}")
};
("registry-1.docker.io".to_owned(), repository)
};
if repository.is_empty() || reference.is_empty() {
return Err(format!("invalid image reference: {image}"));
}
Ok(RegistryImageRef {
registry,
repository,
reference,
})
}
fn registry_accept_header() -> &'static str {
"application/vnd.oci.image.index.v1+json, application/vnd.docker.distribution.manifest.list.v2+json, application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json"
}
#[derive(Default)]
enum RegistryAuth<'a> {
#[default]
None,
Bearer(&'a str),
Basic(&'a RegistryCreds),
}
fn curl_request(
url: &str,
accept: Option<&str>,
auth: RegistryAuth<'_>,
output_path: Option<&Path>,
) -> Result<HttpResponse, String> {
let work_dir = temp_work_dir("supermachine-curl")?;
let headers_path = work_dir.join("headers");
let body_path = output_path
.map(PathBuf::from)
.unwrap_or_else(|| work_dir.join("body"));
let result = (|| {
let mut cmd = Command::new("curl");
cmd.arg("-sS")
.arg("-L")
.arg("-w")
.arg("%{http_code}")
.arg("-D")
.arg(&headers_path)
.arg("-o")
.arg(&body_path);
if let Some(accept) = accept {
cmd.arg("-H").arg(format!("Accept: {accept}"));
}
match auth {
RegistryAuth::None => {}
RegistryAuth::Bearer(token) => {
cmd.arg("-H").arg(format!("Authorization: Bearer {token}"));
}
RegistryAuth::Basic(creds) => {
cmd.arg("-H")
.arg(format!("Authorization: {}", creds.basic_header_value()));
}
}
cmd.arg(url);
let out = command_output(cmd, "curl")?;
let status = out
.trim()
.parse::<u16>()
.map_err(|e| format!("parse curl status for {url}: {e}; output={out:?}"))?;
let headers = std::fs::read_to_string(&headers_path)
.map_err(|e| format!("read curl headers: {e}"))?;
let body = if output_path.is_some() {
Vec::new()
} else {
std::fs::read(&body_path).map_err(|e| format!("read curl body: {e}"))?
};
Ok(HttpResponse {
status,
headers,
body,
})
})();
let _ = std::fs::remove_dir_all(work_dir);
result
}
fn parse_bearer_challenge(headers: &str) -> Option<(String, String, String)> {
let line = headers
.lines()
.find(|line| line.to_ascii_lowercase().starts_with("www-authenticate:"))?;
let value = line.split_once(':')?.1.trim();
let params = value.strip_prefix("Bearer ")?;
let mut realm = None;
let mut service = None;
let mut scope = None;
for part in params.split(',') {
let (k, v) = part.trim().split_once('=')?;
let v = v.trim().trim_matches('"').to_owned();
match k {
"realm" => realm = Some(v),
"service" => service = Some(v),
"scope" => scope = Some(v),
_ => {}
}
}
Some((
realm?,
service.unwrap_or_default(),
scope.unwrap_or_default(),
))
}
fn url_encode_query(s: &str) -> String {
let mut out = String::new();
for b in s.bytes() {
if b.is_ascii_alphanumeric() || matches!(b, b'-' | b'.' | b'_' | b'~') {
out.push(b as char);
} else {
out.push_str(&format!("%{b:02X}"));
}
}
out
}
fn registry_token(
realm: &str,
service: &str,
scope: &str,
creds: Option<&RegistryCreds>,
) -> Result<String, String> {
let mut url = format!("{realm}?service={}", url_encode_query(service));
if !scope.is_empty() {
url.push_str("&scope=");
url.push_str(&url_encode_query(scope));
}
let auth = match creds {
Some(c) => RegistryAuth::Basic(c),
None => RegistryAuth::None,
};
let resp = curl_request(&url, None, auth, None)?;
if resp.status >= 400 {
return Err(format!(
"registry token request failed with HTTP {} (creds={})",
resp.status,
if creds.is_some() { "yes" } else { "no" },
));
}
let json: serde_json::Value =
serde_json::from_slice(&resp.body).map_err(|e| format!("registry token JSON: {e}"))?;
json.get("token")
.or_else(|| json.get("access_token"))
.and_then(|v| v.as_str())
.map(ToOwned::to_owned)
.ok_or_else(|| "registry token response missing token".to_owned())
}
fn registry_request(
image: &RegistryImageRef,
path: &str,
accept: Option<&str>,
output_path: Option<&Path>,
) -> Result<HttpResponse, String> {
let url = format!(
"https://{}/v2/{}/{}",
image.registry, image.repository, path
);
let creds = docker_config_auth(&image.registry);
let first = curl_request(&url, accept, RegistryAuth::None, output_path)?;
if first.status != 401 {
return Ok(first);
}
let (realm, service, scope) = parse_bearer_challenge(&first.headers)
.ok_or_else(|| format!("registry auth challenge missing/unsupported for {url}"))?;
let token = registry_token(&realm, &service, &scope, creds.as_ref())?;
curl_request(&url, accept, RegistryAuth::Bearer(&token), output_path)
}
fn read_registry_manifest(
image: &RegistryImageRef,
force_refresh: bool,
) -> Result<RegistryManifest, String> {
let ref_cache_ttl_ms = registry_ref_cache_ttl_ms();
if !force_refresh {
if let Some(cached) = read_registry_ref_cache(image, ref_cache_ttl_ms)? {
return Ok(cached);
}
}
let mut blob_cache_hits = 0usize;
let mut blob_downloads = 0usize;
let resp = registry_request(
image,
&format!("manifests/{}", image.reference),
Some(registry_accept_header()),
None,
)?;
if resp.status >= 400 {
return Err(format!(
"registry manifest request failed for {}/{}:{} with HTTP {}",
image.registry, image.repository, image.reference, resp.status
));
}
let root: serde_json::Value =
serde_json::from_slice(&resp.body).map_err(|e| format!("registry manifest JSON: {e}"))?;
let manifest_bytes = if root.get("manifests").is_some() {
let desc = find_registry_manifest_descriptor(&root)?;
let digest = desc
.get("digest")
.and_then(|v| v.as_str())
.ok_or_else(|| "registry arm64 descriptor missing digest".to_owned())?;
let (bytes, cache_hit) = read_or_fetch_registry_blob_bytes(
image,
digest,
&format!("manifests/{digest}"),
Some(registry_accept_header()),
)?;
if cache_hit {
blob_cache_hits += 1;
} else {
blob_downloads += 1;
}
bytes
} else {
let manifest_digest = sha256_bytes(&resp.body)?;
store_registry_blob_bytes(&manifest_digest, &resp.body)?;
blob_downloads += 1;
resp.body
};
let manifest: serde_json::Value = serde_json::from_slice(&manifest_bytes)
.map_err(|e| format!("registry arm64 manifest JSON: {e}"))?;
let config_digest = manifest
.get("config")
.and_then(|v| v.get("digest"))
.and_then(|v| v.as_str())
.ok_or_else(|| "registry manifest missing config digest".to_owned())?
.to_owned();
let manifest_digest = sha256_bytes(&manifest_bytes)?;
let (config_bytes, cache_hit) = read_or_fetch_registry_blob_bytes(
image,
&config_digest,
&format!("blobs/{config_digest}"),
None,
)?;
if cache_hit {
blob_cache_hits += 1;
} else {
blob_downloads += 1;
}
serde_json::from_slice::<serde_json::Value>(&config_bytes)
.map_err(|e| format!("registry config JSON: {e}"))?;
write_registry_ref_cache(image, &manifest_digest, &config_digest)?;
Ok(RegistryManifest {
manifest_digest,
manifest_bytes,
manifest,
config_digest,
config_bytes,
blob_cache_hits,
blob_downloads,
ref_cache_hit: false,
ref_cache_age_ms: None,
ref_cache_ttl_ms,
})
}
fn find_registry_manifest_descriptor(
index: &serde_json::Value,
) -> Result<serde_json::Value, String> {
let manifests = index
.get("manifests")
.and_then(|v| v.as_array())
.ok_or_else(|| "registry index missing manifests".to_owned())?;
manifests
.iter()
.find(|desc| {
descriptor_platform_arch(desc) == Some("arm64")
&& desc
.get("platform")
.and_then(|p| p.get("os"))
.and_then(|v| v.as_str())
.unwrap_or("linux")
== "linux"
})
.cloned()
.ok_or_else(|| "registry image has no linux/arm64 manifest".to_owned())
}
fn sha256_bytes(bytes: &[u8]) -> Result<String, String> {
let work_dir = temp_work_dir("supermachine-json-sha")?;
let path = work_dir.join("blob");
let result = (|| {
std::fs::write(&path, bytes).map_err(|e| format!("write digest blob: {e}"))?;
sha256_file(&path)
})();
let _ = std::fs::remove_dir_all(work_dir);
result
}
fn epoch_ms() -> Result<u128, String> {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_millis())
.map_err(|e| format!("system time before Unix epoch: {e}"))
}
fn registry_ref_cache_ttl_ms() -> u128 {
std::env::var("SUPERMACHINE_REGISTRY_REF_CACHE_TTL_MS")
.ok()
.and_then(|s| s.parse::<u128>().ok())
.unwrap_or(60_000)
}
fn registry_ref_cache_dir() -> PathBuf {
layer_cache_dir().join("registry/refs")
}
fn registry_ref_cache_path(image: &RegistryImageRef) -> Result<PathBuf, String> {
let key = sha256_text(&format!(
"{}\n{}\n{}\nlinux/arm64\n",
image.registry, image.repository, image.reference
))?;
Ok(registry_ref_cache_dir().join(format!("{key}.json")))
}
fn read_registry_ref_cache(
image: &RegistryImageRef,
ttl_ms: u128,
) -> Result<Option<RegistryManifest>, String> {
if ttl_ms == 0 {
return Ok(None);
}
let path = registry_ref_cache_path(image)?;
let text = match std::fs::read_to_string(&path) {
Ok(text) => text,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => return Err(format!("read registry ref cache {}: {e}", path.display())),
};
let json: serde_json::Value = serde_json::from_str(&text)
.map_err(|e| format!("parse registry ref cache {}: {e}", path.display()))?;
if json.get("version").and_then(|v| v.as_u64()) != Some(1) {
return Ok(None);
}
let created_ms = json
.get("created_ms")
.and_then(|v| v.as_u64())
.ok_or_else(|| format!("registry ref cache {} missing created_ms", path.display()))?
as u128;
let age_ms = epoch_ms()?.saturating_sub(created_ms);
if age_ms > ttl_ms {
return Ok(None);
}
let manifest_digest = json
.get("manifest_digest")
.and_then(|v| v.as_str())
.ok_or_else(|| {
format!(
"registry ref cache {} missing manifest_digest",
path.display()
)
})?
.to_owned();
let config_digest = json
.get("config_digest")
.and_then(|v| v.as_str())
.ok_or_else(|| {
format!(
"registry ref cache {} missing config_digest",
path.display()
)
})?
.to_owned();
let manifest_path = registry_blob_cache_path(&manifest_digest)?;
let config_path = registry_blob_cache_path(&config_digest)?;
let manifest_bytes = match std::fs::read(&manifest_path) {
Ok(bytes) => bytes,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => {
return Err(format!(
"read registry manifest cache {}: {e}",
manifest_path.display()
))
}
};
let config_bytes = match std::fs::read(&config_path) {
Ok(bytes) => bytes,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => {
return Err(format!(
"read registry config cache {}: {e}",
config_path.display()
))
}
};
let manifest: serde_json::Value = serde_json::from_slice(&manifest_bytes)
.map_err(|e| format!("registry cached manifest JSON: {e}"))?;
let manifest_config_digest = manifest
.get("config")
.and_then(|v| v.get("digest"))
.and_then(|v| v.as_str())
.ok_or_else(|| "registry cached manifest missing config digest".to_owned())?;
if manifest_config_digest != config_digest {
return Ok(None);
}
serde_json::from_slice::<serde_json::Value>(&config_bytes)
.map_err(|e| format!("registry cached config JSON: {e}"))?;
Ok(Some(RegistryManifest {
manifest_digest,
manifest_bytes,
manifest,
config_digest,
config_bytes,
blob_cache_hits: 2,
blob_downloads: 0,
ref_cache_hit: true,
ref_cache_age_ms: Some(age_ms),
ref_cache_ttl_ms: ttl_ms,
}))
}
fn write_registry_ref_cache(
image: &RegistryImageRef,
manifest_digest: &str,
config_digest: &str,
) -> Result<(), String> {
let path = registry_ref_cache_path(image)?;
let dir = path
.parent()
.ok_or_else(|| format!("registry ref cache path has no parent: {}", path.display()))?;
std::fs::create_dir_all(dir).map_err(|e| format!("create registry ref cache: {e}"))?;
let json = serde_json::json!({
"version": 1,
"created_ms": epoch_ms()?,
"registry": image.registry,
"repository": image.repository,
"reference": image.reference,
"platform": {"os": "linux", "architecture": "arm64"},
"manifest_digest": manifest_digest,
"config_digest": config_digest,
});
let tmp = path.with_extension(format!(
"json.{}.{}.tmp",
std::process::id(),
TEMP_COUNTER.fetch_add(1, Ordering::Relaxed)
));
std::fs::write(
&tmp,
serde_json::to_vec_pretty(&json).map_err(|e| format!("encode registry ref cache: {e}"))?,
)
.map_err(|e| format!("write registry ref cache {}: {e}", tmp.display()))?;
std::fs::rename(&tmp, &path).map_err(|e| {
let _ = std::fs::remove_file(&tmp);
format!(
"install registry ref cache {} -> {}: {e}",
tmp.display(),
path.display()
)
})
}
fn registry_blob_cache_dir() -> PathBuf {
layer_cache_dir().join("registry/blobs/sha256")
}
fn registry_blob_cache_path(digest: &str) -> Result<PathBuf, String> {
let sha = strip_sha256(digest);
if sha.is_empty() || sha.contains('/') {
return Err(format!("unsupported registry blob digest: {digest}"));
}
Ok(registry_blob_cache_dir().join(sha))
}
fn registry_blob_tmp_path(sha: &str) -> PathBuf {
let unique = TEMP_COUNTER.fetch_add(1, Ordering::Relaxed);
registry_blob_cache_dir().join(format!(".{sha}.{}.{}.tmp", std::process::id(), unique))
}
fn store_registry_blob_bytes(sha: &str, bytes: &[u8]) -> Result<(), String> {
let cache = registry_blob_cache_path(sha)?;
if cache.is_file() {
return Ok(());
}
let actual = sha256_bytes(bytes)?;
if actual != strip_sha256(sha) {
return Err(format!(
"registry blob digest mismatch: expected {}, got {actual}",
strip_sha256(sha)
));
}
std::fs::create_dir_all(registry_blob_cache_dir())
.map_err(|e| format!("create registry blob cache: {e}"))?;
let tmp = registry_blob_tmp_path(&strip_sha256(sha));
std::fs::write(&tmp, bytes)
.map_err(|e| format!("write registry cache {}: {e}", tmp.display()))?;
match std::fs::rename(&tmp, &cache) {
Ok(()) => Ok(()),
Err(e) if cache.is_file() => {
let _ = std::fs::remove_file(&tmp);
let _ = e;
Ok(())
}
Err(e) => {
let _ = std::fs::remove_file(&tmp);
Err(format!(
"install registry cache {} -> {}: {e}",
tmp.display(),
cache.display()
))
}
}
}
fn read_or_fetch_registry_blob_bytes(
image: &RegistryImageRef,
digest: &str,
registry_path: &str,
accept: Option<&str>,
) -> Result<(Vec<u8>, bool), String> {
let cache = registry_blob_cache_path(digest)?;
if cache.is_file() {
let bytes = std::fs::read(&cache)
.map_err(|e| format!("read registry cache {}: {e}", cache.display()))?;
return Ok((bytes, true));
}
let resp = registry_request(image, registry_path, accept, None)?;
if resp.status >= 400 {
return Err(format!("registry blob {digest} HTTP {}", resp.status));
}
store_registry_blob_bytes(digest, &resp.body)?;
Ok((resp.body, false))
}
fn copy_or_fetch_registry_blob_to_layout(
image: &RegistryImageRef,
digest: &str,
out: &Path,
) -> Result<bool, String> {
if out.is_file() {
return Ok(true);
}
let cache = registry_blob_cache_path(digest)?;
if cache.is_file() {
std::fs::copy(&cache, out).map_err(|e| {
format!(
"copy registry cache {} -> {}: {e}",
cache.display(),
out.display()
)
})?;
return Ok(true);
}
std::fs::create_dir_all(registry_blob_cache_dir())
.map_err(|e| format!("create registry blob cache: {e}"))?;
let sha = strip_sha256(digest);
let tmp = registry_blob_tmp_path(&sha);
let resp = registry_request(image, &format!("blobs/{digest}"), None, Some(&tmp))?;
if resp.status >= 400 {
let _ = std::fs::remove_file(&tmp);
return Err(format!("registry layer blob {digest} HTTP {}", resp.status));
}
let actual = sha256_file(&tmp)?;
if actual != sha {
let _ = std::fs::remove_file(&tmp);
return Err(format!(
"registry blob digest mismatch: expected {sha}, got {actual}"
));
}
match std::fs::rename(&tmp, &cache) {
Ok(()) => {}
Err(e) if cache.is_file() => {
let _ = std::fs::remove_file(&tmp);
let _ = e;
}
Err(e) => {
let _ = std::fs::remove_file(&tmp);
return Err(format!(
"install registry cache {} -> {}: {e}",
tmp.display(),
cache.display()
));
}
}
std::fs::copy(&cache, out).map_err(|e| {
format!(
"copy registry cache {} -> {}: {e}",
cache.display(),
out.display()
)
})?;
Ok(false)
}
fn fetch_registry_to_oci_layout(
image: &RegistryImageRef,
layout: &Path,
force_refresh: bool,
) -> Result<(), String> {
let _ = std::fs::remove_dir_all(layout);
std::fs::create_dir_all(layout.join("blobs/sha256"))
.map_err(|e| format!("create registry OCI layout {}: {e}", layout.display()))?;
std::fs::write(
layout.join("oci-layout"),
r#"{"imageLayoutVersion":"1.0.0"}"#,
)
.map_err(|e| format!("write OCI layout marker: {e}"))?;
let rm = read_registry_manifest(image, force_refresh)?;
let mut blob_cache_hits = rm.blob_cache_hits;
let mut blob_downloads = rm.blob_downloads;
if trace_enabled() {
eprintln!(
"supermachine: registry ref cache_hit={} age_ms={} ttl_ms={} cache={}",
rm.ref_cache_hit,
rm.ref_cache_age_ms
.map(|age| age.to_string())
.unwrap_or_default(),
rm.ref_cache_ttl_ms,
registry_ref_cache_dir().display()
);
}
write_oci_blob_bytes(layout, &rm.manifest_digest, &rm.manifest_bytes)?;
write_oci_blob_bytes(layout, &strip_sha256(&rm.config_digest), &rm.config_bytes)?;
let layers = rm
.manifest
.get("layers")
.and_then(|v| v.as_array())
.ok_or_else(|| "registry manifest missing layers".to_owned())?;
for layer in layers {
let digest = layer
.get("digest")
.and_then(|v| v.as_str())
.ok_or_else(|| "registry layer missing digest".to_owned())?;
let sha = strip_sha256(digest);
let out = layout.join("blobs/sha256").join(&sha);
if out.is_file() {
blob_cache_hits += 1;
continue;
}
if copy_or_fetch_registry_blob_to_layout(image, digest, &out)? {
blob_cache_hits += 1;
} else {
blob_downloads += 1;
}
}
let index = serde_json::json!({
"schemaVersion": 2,
"mediaType": "application/vnd.oci.image.index.v1+json",
"manifests": [{
"mediaType": "application/vnd.oci.image.manifest.v1+json",
"digest": format!("sha256:{}", rm.manifest_digest),
"platform": {"architecture": "arm64", "os": "linux"}
}]
});
std::fs::write(
layout.join("index.json"),
serde_json::to_vec_pretty(&index).map_err(|e| format!("encode OCI index: {e}"))?,
)
.map_err(|e| format!("write OCI index: {e}"))?;
if trace_enabled() {
eprintln!(
"supermachine: registry blobs cache_hits={} downloads={} cache={}",
blob_cache_hits,
blob_downloads,
registry_blob_cache_dir().display()
);
}
Ok(())
}
fn write_oci_blob_bytes(layout: &Path, sha: &str, bytes: &[u8]) -> Result<(), String> {
let path = layout.join("blobs/sha256").join(sha);
if path.is_file() {
return Ok(());
}
std::fs::write(&path, bytes).map_err(|e| format!("write OCI blob {}: {e}", path.display()))
}
fn extract_oci_archive(archive: &Path, work_dir: &Path) -> Result<PathBuf, String> {
let layout = work_dir.join("_oci");
let _ = std::fs::remove_dir_all(&layout);
std::fs::create_dir_all(&layout)
.map_err(|e| format!("create OCI archive extract dir {}: {e}", layout.display()))?;
let mut tar = Command::new("tar");
tar.arg("-xf").arg(archive).arg("-C").arg(&layout);
run_status(tar, "extract OCI archive")?;
if !layout.join("index.json").is_file() {
return Err(format!(
"OCI archive {} did not contain index.json at archive root",
archive.display()
));
}
Ok(layout)
}
fn blob_path(layout: &Path, digest: &str) -> Result<PathBuf, String> {
let sha = strip_sha256(digest);
if sha.is_empty() || sha.contains('/') {
return Err(format!("unsupported OCI digest: {digest}"));
}
Ok(layout.join("blobs/sha256").join(sha))
}
fn read_oci_json_blob(
layout: &Path,
digest: &str,
label: &str,
) -> Result<serde_json::Value, String> {
let path = blob_path(layout, digest)?;
let text = std::fs::read_to_string(&path)
.map_err(|e| format!("read OCI {label} {}: {e}", path.display()))?;
serde_json::from_str(&text).map_err(|e| format!("parse OCI {label} {}: {e}", path.display()))
}
fn descriptor_platform_arch(desc: &serde_json::Value) -> Option<&str> {
desc.get("platform")
.and_then(|p| p.get("architecture"))
.and_then(|v| v.as_str())
}
fn find_oci_manifest_descriptor(
layout: &Path,
index: &serde_json::Value,
depth: usize,
) -> Result<serde_json::Value, String> {
if depth > 4 {
return Err("nested OCI image index too deep".to_owned());
}
let manifests = index
.get("manifests")
.and_then(|v| v.as_array())
.ok_or_else(|| "OCI index missing manifests".to_owned())?;
for desc in manifests {
if descriptor_platform_arch(desc) == Some("arm64") {
let media_type = desc
.get("mediaType")
.and_then(|v| v.as_str())
.unwrap_or_default();
if media_type.contains("image.index") || media_type.contains("manifest.list") {
let digest = desc
.get("digest")
.and_then(|v| v.as_str())
.ok_or_else(|| "nested OCI index descriptor missing digest".to_owned())?;
let nested = read_oci_json_blob(layout, digest, "nested index")?;
return find_oci_manifest_descriptor(layout, &nested, depth + 1);
}
return Ok(desc.clone());
}
}
Err("OCI layout has no linux/arm64 image manifest".to_owned())
}
fn inspect_oci_layout(image: &str, layout: &Path) -> Result<serde_json::Value, String> {
let index_text = std::fs::read_to_string(layout.join("index.json")).map_err(|e| {
format!(
"read OCI layout index {}: {e}",
layout.join("index.json").display()
)
})?;
let index: serde_json::Value =
serde_json::from_str(&index_text).map_err(|e| format!("parse OCI layout index: {e}"))?;
let manifest_desc = find_oci_manifest_descriptor(layout, &index, 0)?;
let manifest_digest = manifest_desc
.get("digest")
.and_then(|v| v.as_str())
.ok_or_else(|| "OCI manifest descriptor missing digest".to_owned())?;
let manifest = read_oci_json_blob(layout, manifest_digest, "manifest")?;
let config_digest = manifest
.get("config")
.and_then(|v| v.get("digest"))
.and_then(|v| v.as_str())
.ok_or_else(|| "OCI manifest missing config digest".to_owned())?;
let config = read_oci_json_blob(layout, config_digest, "config")?;
let cfg = config
.get("config")
.cloned()
.unwrap_or_else(|| serde_json::json!({}));
let arch = config
.get("architecture")
.and_then(|v| v.as_str())
.or_else(|| descriptor_platform_arch(&manifest_desc))
.unwrap_or("arm64");
Ok(serde_json::json!({
"Id": format!("sha256:{}", strip_sha256(config_digest)),
"Architecture": arch,
"RepoTags": [image],
"Config": {
"Env": cfg.get("Env").cloned().unwrap_or(serde_json::Value::Null),
"Entrypoint": cfg.get("Entrypoint").cloned().unwrap_or(serde_json::Value::Null),
"Cmd": cfg.get("Cmd").cloned().unwrap_or(serde_json::Value::Null),
"WorkingDir": cfg.get("WorkingDir").cloned().unwrap_or(serde_json::Value::Null),
"User": cfg.get("User").cloned().unwrap_or(serde_json::Value::Null),
}
}))
}
struct BakePlan<'a> {
image: &'a str,
name: Option<&'a str>,
runtime: &'a str,
guest_port: u16,
memory_mib: u32,
vcpus: u32,
pull_policy: &'a str,
snapshots_dir: &'a Path,
cmd_override: Option<&'a str>,
extra_args: &'a [String],
}
impl<'a> BakePlan<'a> {
fn from_request(request: &'a BakeRequest) -> Self {
Self {
image: &request.image,
name: request.name.as_deref(),
runtime: &request.runtime,
guest_port: request.guest_port,
memory_mib: request.memory_mib,
vcpus: request.vcpus,
pull_policy: &request.pull_policy,
snapshots_dir: &request.snapshots_dir,
cmd_override: request.cmd_override.as_deref(),
extra_args: &request.extra_args,
}
}
fn snapshot_name(&self) -> String {
self.name
.map(ToOwned::to_owned)
.unwrap_or_else(|| sanitized_snapshot_name(self.image))
}
fn metadata_path(&self) -> PathBuf {
self.snapshots_dir
.join(self.snapshot_name())
.join("metadata.json")
}
fn command(&self, root: &Path) -> Result<Command, String> {
let push = root.join("tools/supermachine-push");
if !push.is_file() {
return Err(format!("missing image bake tool: {}", push.display()));
}
let mut cmd = Command::new(push);
cmd.arg(self.image)
.arg("--runtime")
.arg(self.runtime)
.arg("--port")
.arg(self.guest_port.to_string())
.arg("--memory")
.arg(self.memory_mib.to_string())
.arg("--pull")
.arg("never")
.env("SUPERMACHINE_SNAPSHOTS", self.snapshots_dir);
if let Some(name) = self.name {
cmd.arg("--name").arg(name);
}
cmd.args(self.extra_args);
Ok(cmd)
}
}
struct ImageResolution {
local_arch: Option<String>,
architecture: Option<String>,
image_id: Option<String>,
effective_cmd: Vec<String>,
working_dir: Option<String>,
user: Option<String>,
env: Vec<String>,
env_count: usize,
pull_action: String,
inspect_ms: u128,
}
struct LayerPlan {
cache_dir: PathBuf,
index_path: Option<PathBuf>,
layer_shas: Vec<String>,
save_work_dir: Option<PathBuf>,
save_dir: Option<PathBuf>,
cached_layers: usize,
missing_layers: usize,
manifest_cache_hit: bool,
plan_ms: u128,
}
struct LayerMaterialization {
materialize_ms: u128,
built_layers: usize,
reused_layers: usize,
}
struct DeltaMaterialization {
prepare_ms: u128,
materialize_ms: u128,
cache_hit: bool,
skipped: Option<String>,
key: Option<String>,
cache_path: Option<PathBuf>,
}
struct NativeBakeResult {
total_ms: u128,
timings: serde_json::Value,
reused: bool,
}
static TEMP_COUNTER: AtomicU64 = AtomicU64::new(0);
fn emit_image_resolution_trace(resolution: &ImageResolution) {
let cmd_json =
serde_json::to_string(&resolution.effective_cmd).unwrap_or_else(|_| "[]".to_owned());
eprintln!(
"supermachine: image resolved inspect_ms={} pull={} local_arch={} arch={} image_id={} env_count={} cmd={}",
resolution.inspect_ms,
resolution.pull_action,
resolution.local_arch.as_deref().unwrap_or(""),
resolution.architecture.as_deref().unwrap_or(""),
resolution
.image_id
.as_deref()
.map(|s| &s[..s.len().min(16)])
.unwrap_or(""),
resolution.env_count,
cmd_json
);
}
pub fn run_push(request: &BakeRequest, run_t0: Instant, root: &Path) -> Result<(), String> {
let plan = BakePlan::from_request(request);
if trace_enabled() {
eprintln!(
"supermachine: bake plan image={} name={} runtime={} port={} memory={}MiB snapshots={} pull={}",
plan.image,
plan.snapshot_name(),
plan.runtime,
plan.guest_port,
plan.memory_mib,
plan.snapshots_dir.display(),
plan.pull_policy
);
}
if plan.runtime == "supermachine"
&& std::env::var("SUPERMACHINE_NATIVE_BAKE_TAIL")
.map(|v| v != "0" && v != "false")
.unwrap_or(true)
{
if let Some(result) = try_fast_cache_hit(&plan, root, run_t0)? {
if trace_enabled() {
eprintln!(
"supermachine: fast cache-hit after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
eprintln!("supermachine: bake timings {}", result.timings);
}
return Ok(());
}
}
let source = select_image_source(plan.image)?;
let resolution = resolve_image(&plan, source.as_ref())?;
if trace_enabled() {
emit_image_resolution_trace(&resolution);
}
if plan.runtime == "supermachine"
&& std::env::var("SUPERMACHINE_NATIVE_BAKE_TAIL")
.map(|v| v != "0" && v != "false")
.unwrap_or(true)
{
if let Some(result) = native_supermachine_early_reuse_snapshot(&plan, &resolution, root, run_t0)?
{
if trace_enabled() {
eprintln!(
"supermachine: native bake reused snapshot after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
eprintln!("supermachine: bake timings {}", result.timings);
eprintln!(
"supermachine: push finished after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
}
return Ok(());
}
}
let layer_plan = plan_layers(&plan, &resolution, source.as_ref())?;
let layer_materialization_result = match layer_plan.as_ref() {
Some(layer_plan) => {
materialize_missing_layers(plan.image, layer_plan, source.as_ref()).map(Some)
}
None => Ok(None),
};
if let Some(work_dir) = layer_plan
.as_ref()
.and_then(|layer_plan| layer_plan.save_work_dir.as_deref())
{
let _ = std::fs::remove_dir_all(work_dir);
}
let layer_materialization = layer_materialization_result?;
let delta_materialization = materialize_delta_cache(&plan, &resolution, root)?;
if trace_enabled() {
if let Some(layer_plan) = layer_plan.as_ref() {
let first_layer = layer_plan
.layer_shas
.first()
.map(|s| &s[..s.len().min(16)])
.unwrap_or("");
eprintln!(
"supermachine: layer plan plan_ms={} layers={} cached={} missing={} manifest_cache_hit={} cache={} index={} first_layer={}",
layer_plan.plan_ms,
layer_plan.layer_shas.len(),
layer_plan.cached_layers,
layer_plan.missing_layers,
layer_plan.manifest_cache_hit,
layer_plan.cache_dir.display(),
layer_plan
.index_path
.as_deref()
.map(|p| p.display().to_string())
.unwrap_or_default(),
first_layer
);
}
if let Some(materialization) = layer_materialization.as_ref() {
eprintln!(
"supermachine: layer materialize materialize_ms={} built={} reused={}",
materialization.materialize_ms,
materialization.built_layers,
materialization.reused_layers
);
}
if let Some(delta) = delta_materialization.as_ref() {
eprintln!(
"supermachine: delta materialize prepare_ms={} materialize_ms={} cache_hit={} skipped={} key={} cache={}",
delta.prepare_ms,
delta.materialize_ms,
delta.cache_hit,
delta.skipped.as_deref().unwrap_or(""),
delta
.key
.as_deref()
.map(|key| &key[..key.len().min(16)])
.unwrap_or(""),
delta
.cache_path
.as_deref()
.map(|path| path.display().to_string())
.unwrap_or_default()
);
}
}
if plan.runtime == "supermachine"
&& std::env::var("SUPERMACHINE_NATIVE_BAKE_TAIL")
.map(|v| v != "0" && v != "false")
.unwrap_or(true)
{
let layer_plan = layer_plan
.as_ref()
.ok_or_else(|| "missing layer plan".to_owned())?;
let delta = delta_materialization
.as_ref()
.ok_or_else(|| "missing delta cache".to_owned())?;
let (native_bake_key, native_bake_inputs) =
native_supermachine_bake_key(&plan, &resolution, layer_plan, delta, root)?;
if let Some(result) = native_supermachine_reuse_snapshot(&plan, &native_bake_key, run_t0)? {
if trace_enabled() {
eprintln!(
"supermachine: native bake reused snapshot after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
eprintln!("supermachine: bake timings {}", result.timings);
eprintln!(
"supermachine: push finished after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
}
return Ok(());
}
let native_t0 = Instant::now();
let result = run_native_supermachine_bake(
&plan,
&resolution,
layer_plan,
delta,
root,
&native_bake_key,
&native_bake_inputs,
)?;
if trace_enabled() {
if result.reused {
eprintln!(
"supermachine: native bake reused snapshot after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
} else {
eprintln!(
"supermachine: native bake finished after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
}
eprintln!("supermachine: bake timings {}", result.timings);
}
if trace_enabled() {
eprintln!(
"supermachine: push finished after {}ms total={}ms",
elapsed_ms(native_t0),
elapsed_ms(run_t0)
);
}
return Ok(());
}
let mut cmd = plan.command(root)?;
let push_t0 = Instant::now();
let status = cmd
.status()
.map_err(|e| format!("supermachine-push: {e}"))?;
let push_ms = elapsed_ms(push_t0);
if trace_enabled() {
eprintln!(
"supermachine: push finished after {}ms total={}ms",
push_ms,
elapsed_ms(run_t0)
);
}
if status.success() {
emit_bake_timing_metadata(&plan);
Ok(())
} else {
Err(format!(
"supermachine-push failed with exit code {}",
status.code().unwrap_or(1)
))
}
}
fn home_join(path: &str) -> PathBuf {
std::env::var_os("HOME")
.map(PathBuf::from)
.unwrap_or_else(|| PathBuf::from("."))
.join(path)
}
fn layer_cache_dir() -> PathBuf {
std::env::var_os("SUPERMACHINE_LAYER_CACHE")
.map(PathBuf::from)
.unwrap_or_else(|| home_join(".local/supermachine-layer-cache"))
}
fn volumes_dir() -> PathBuf {
std::env::var_os("SUPERMACHINE_VOLUMES")
.map(PathBuf::from)
.unwrap_or_else(|| home_join(".local/supermachine/volumes"))
}
#[derive(Debug, Clone)]
pub(crate) struct VolumeMapping {
pub host_file: PathBuf,
pub guest_path: String,
}
pub(crate) fn parse_volume_args(extra_args: &[String]) -> Result<Vec<VolumeMapping>, String> {
let mut out = Vec::new();
for raw in arg_values(extra_args, "--volume") {
let (host, guest) = raw
.split_once(':')
.ok_or_else(|| format!("--volume expects HOST:GUEST, got {raw:?}"))?;
if host.is_empty() || guest.is_empty() {
return Err(format!("--volume HOST:GUEST has empty side: {raw:?}"));
}
if !guest.starts_with('/') {
return Err(format!("--volume guest path must be absolute: {guest:?}"));
}
if guest.contains('\n') || guest.contains('\r') {
return Err(format!("--volume guest path contains newline: {guest:?}"));
}
let host_file = if host.contains('/') || host.starts_with('.') {
PathBuf::from(host)
} else {
if host.chars().any(|c| matches!(c, '/' | '\\' | ':' | '\0')) {
return Err(format!("--volume name {host:?} contains forbidden chars"));
}
volumes_dir().join(format!("{host}.img"))
};
out.push(VolumeMapping {
host_file,
guest_path: guest.to_owned(),
});
}
Ok(out)
}
fn locate_mke2fs() -> Option<PathBuf> {
for candidate in ["mkfs.ext4", "mke2fs", "/usr/sbin/mkfs.ext4"] {
if let Ok(out) = Command::new("which").arg(candidate).output() {
if out.status.success() {
let p = String::from_utf8_lossy(&out.stdout).trim().to_owned();
if !p.is_empty() {
return Some(PathBuf::from(p));
}
}
}
}
let android = std::env::var_os("HOME")
.map(|h| {
PathBuf::from(h).join("Library/Android/sdk/platform-tools/mke2fs")
})
.filter(|p| p.is_file());
if android.is_some() {
return android;
}
None
}
pub(crate) fn ensure_volume_host_file(
mapping: &VolumeMapping,
size_bytes: u64,
) -> Result<(), String> {
let path = &mapping.host_file;
if let Some(parent) = path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("create volume dir {}: {e}", parent.display()))?;
}
let already_existed = path.is_file();
if already_existed {
let len = std::fs::metadata(path)
.map_err(|e| format!("stat {}: {e}", path.display()))?
.len();
if len >= size_bytes {
return Ok(());
}
return Err(format!(
"volume {} exists at {len} bytes, smaller than requested {size_bytes}; \
remove it or pass a smaller size",
path.display()
));
}
let f = std::fs::OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(path)
.map_err(|e| format!("create {}: {e}", path.display()))?;
f.set_len(size_bytes)
.map_err(|e| format!("truncate {}: {e}", path.display()))?;
drop(f);
let mke2fs = locate_mke2fs().ok_or_else(|| {
format!(
"no `mke2fs` / `mkfs.ext4` on PATH; install e2fsprogs (\
`brew install e2fsprogs`) so volume {} can be formatted",
path.display()
)
})?;
let mut cmd = Command::new(&mke2fs);
cmd.arg("-t")
.arg("ext4")
.arg("-F")
.arg("-L")
.arg("supermachine-vol")
.arg(path)
.stdout(Stdio::null())
.stderr(Stdio::piped());
let out = cmd
.output()
.map_err(|e| format!("spawn {}: {e}", mke2fs.display()))?;
if !out.status.success() {
let _ = std::fs::remove_file(path);
return Err(format!(
"{} {}: exit {:?}: {}",
mke2fs.display(),
path.display(),
out.status.code(),
String::from_utf8_lossy(&out.stderr).trim()
));
}
Ok(())
}
fn trace_enabled() -> bool {
std::env::var_os("SUPERMACHINE_RUN_TRACE").is_some()
}
fn elapsed_ms(t0: Instant) -> u128 {
t0.elapsed().as_millis()
}
pub fn snapshot_name_for_image(image: &str) -> String {
sanitized_snapshot_name(image)
}
fn sanitized_snapshot_name(image: &str) -> String {
image
.chars()
.map(|c| if matches!(c, ':' | '/' | '.') { '_' } else { c })
.take(60)
.collect()
}
fn emit_bake_timing_metadata(plan: &BakePlan<'_>) {
if !trace_enabled() {
return;
}
let meta_path = plan.metadata_path();
let Ok(text) = std::fs::read_to_string(&meta_path) else {
return;
};
let Ok(json) = serde_json::from_str::<serde_json::Value>(&text) else {
return;
};
if let Some(timings) = json.get("timings") {
eprintln!("supermachine: bake timings {timings}");
}
}
fn command_output(mut cmd: Command, label: &str) -> Result<String, String> {
let output = cmd.output().map_err(|e| format!("{label}: {e}"))?;
if output.status.success() {
return String::from_utf8(output.stdout)
.map_err(|e| format!("{label}: non-utf8 stdout: {e}"));
}
let stderr = String::from_utf8_lossy(&output.stderr);
let stdout = String::from_utf8_lossy(&output.stdout);
let detail = stderr
.lines()
.find(|line| !line.trim().is_empty())
.or_else(|| stdout.lines().find(|line| !line.trim().is_empty()))
.unwrap_or("command failed");
Err(format!("{label}: {detail}"))
}
fn run_status(mut cmd: Command, label: &str) -> Result<(), String> {
let status = cmd.status().map_err(|e| format!("{label}: {e}"))?;
if status.success() {
Ok(())
} else {
Err(format!(
"{label} failed with exit code {}",
status.code().unwrap_or(1)
))
}
}
fn spawn_status(mut cmd: Command, label: &str) -> Result<std::process::ExitStatus, String> {
cmd.status().map_err(|e| format!("{label}: {e}"))
}
fn value_string_array(value: Option<&serde_json::Value>) -> Vec<String> {
match value {
Some(serde_json::Value::Array(items)) => items
.iter()
.filter_map(|v| v.as_str().map(ToOwned::to_owned))
.collect(),
Some(serde_json::Value::String(s)) if !s.is_empty() => vec![s.to_owned()],
_ => Vec::new(),
}
}
fn temp_work_dir(prefix: &str) -> Result<PathBuf, String> {
let n = TEMP_COUNTER.fetch_add(1, Ordering::Relaxed);
let path = std::env::temp_dir().join(format!("{prefix}-{}-{n}", std::process::id()));
std::fs::create_dir_all(&path)
.map_err(|e| format!("create temp dir {}: {e}", path.display()))?;
Ok(path)
}
fn strip_sha256(s: &str) -> String {
s.strip_prefix("sha256:").unwrap_or(s).to_owned()
}
fn read_layer_index(path: &Path, cache_dir: &Path) -> Result<Option<Vec<String>>, String> {
let text = match std::fs::read_to_string(path) {
Ok(text) => text,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => return Err(format!("read layer index {}: {e}", path.display())),
};
let shas: Vec<String> = text
.lines()
.map(str::trim)
.filter(|line| !line.is_empty())
.map(ToOwned::to_owned)
.collect();
if shas.is_empty() {
return Ok(None);
}
let complete = shas
.iter()
.all(|sha| cache_dir.join(format!("{sha}.squashfs")).is_file());
Ok(complete.then_some(shas))
}
fn layer_shas_from_save_dir(image: &str, save_dir: &Path) -> Result<Vec<String>, String> {
let index_text = std::fs::read_to_string(save_dir.join("index.json"))
.map_err(|e| format!("read image save index.json: {e}"))?;
let index: serde_json::Value =
serde_json::from_str(&index_text).map_err(|e| format!("image save index JSON: {e}"))?;
let manifest_digest = index
.get("manifests")
.and_then(|v| v.as_array())
.and_then(|items| {
items.iter().find_map(|item| {
let arch = item
.get("platform")
.and_then(|p| p.get("architecture"))
.and_then(|v| v.as_str());
if arch == Some("arm64") {
item.get("digest")
.and_then(|v| v.as_str())
.map(strip_sha256)
} else {
None
}
})
})
.ok_or_else(|| format!("no arm64 manifest in image {image}"))?;
let manifest_path = save_dir.join("blobs/sha256").join(&manifest_digest);
let manifest_text = std::fs::read_to_string(&manifest_path)
.map_err(|e| format!("read image save manifest {}: {e}", manifest_path.display()))?;
let manifest: serde_json::Value = serde_json::from_str(&manifest_text)
.map_err(|e| format!("image save manifest JSON: {e}"))?;
let shas: Vec<String> = manifest
.get("layers")
.and_then(|v| v.as_array())
.ok_or_else(|| "image save manifest missing layers".to_owned())?
.iter()
.filter_map(|layer| {
layer
.get("digest")
.and_then(|v| v.as_str())
.map(strip_sha256)
})
.collect();
if shas.is_empty() {
return Err(format!("image {image} has no arm64 layers"));
}
Ok(shas)
}
fn plan_layers(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
source: &dyn ImageSource,
) -> Result<Option<LayerPlan>, String> {
if plan.runtime != "supermachine" {
return Ok(None);
}
let t0 = Instant::now();
let cache_dir = layer_cache_dir();
let index_dir = cache_dir.join("images");
std::fs::create_dir_all(&index_dir)
.map_err(|e| format!("create layer index dir {}: {e}", index_dir.display()))?;
std::fs::create_dir_all(cache_dir.join("deltas"))
.map_err(|e| format!("create layer delta cache dir: {e}"))?;
let index_path = resolution
.image_id
.as_deref()
.map(|id| index_dir.join(format!("{id}.arm64.layers")));
let mut manifest_cache_hit = false;
let mut save_work_dir = None;
let mut save_dir = None;
let layer_shas = if let Some(path) = index_path.as_deref() {
if let Some(shas) = read_layer_index(path, &cache_dir)? {
manifest_cache_hit = true;
shas
} else {
let work_dir = temp_work_dir("supermachine-layer-plan")?;
let saved_dir = source.save_arm64(plan.image, &work_dir)?;
let shas = layer_shas_from_save_dir(plan.image, &saved_dir)?;
std::fs::write(path, format!("{}\n", shas.join("\n")))
.map_err(|e| format!("write layer index {}: {e}", path.display()))?;
save_work_dir = Some(work_dir);
save_dir = Some(saved_dir);
shas
}
} else {
let work_dir = temp_work_dir("supermachine-layer-plan")?;
let saved_dir = source.save_arm64(plan.image, &work_dir)?;
let shas = layer_shas_from_save_dir(plan.image, &saved_dir)?;
save_work_dir = Some(work_dir);
save_dir = Some(saved_dir);
shas
};
let cached_layers = layer_shas
.iter()
.filter(|sha| cache_dir.join(format!("{sha}.squashfs")).is_file())
.count();
let missing_layers = layer_shas.len().saturating_sub(cached_layers);
Ok(Some(LayerPlan {
cache_dir,
index_path,
layer_shas,
save_work_dir,
save_dir,
cached_layers,
missing_layers,
manifest_cache_hit,
plan_ms: elapsed_ms(t0),
}))
}
fn remove_oci_whiteouts(root: &Path) -> Result<(), String> {
let entries =
std::fs::read_dir(root).map_err(|e| format!("read layer dir {}: {e}", root.display()))?;
for entry in entries {
let entry = entry.map_err(|e| format!("read layer dir entry {}: {e}", root.display()))?;
let path = entry.path();
let file_type = entry
.file_type()
.map_err(|e| format!("stat layer path {}: {e}", path.display()))?;
if file_type.is_dir() {
remove_oci_whiteouts(&path)?;
}
let name = entry.file_name();
let Some(name) = name.to_str() else {
continue;
};
if !name.starts_with(".wh.") {
continue;
}
if name != ".wh..wh..opq" {
let target = path.with_file_name(name.trim_start_matches(".wh."));
match std::fs::remove_dir_all(&target) {
Ok(()) => {}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
Err(dir_err) => {
std::fs::remove_file(&target).map_err(|e2| {
format!(
"remove whiteout target {}: {dir_err}; {e2}",
target.display()
)
})?;
}
}
}
std::fs::remove_file(&path)
.map_err(|e| format!("remove OCI whiteout {}: {e}", path.display()))?;
}
Ok(())
}
fn materialize_missing_layers(
image: &str,
layer_plan: &LayerPlan,
source: &dyn ImageSource,
) -> Result<LayerMaterialization, String> {
let t0 = Instant::now();
if layer_plan.missing_layers == 0 {
return Ok(LayerMaterialization {
materialize_ms: elapsed_ms(t0),
built_layers: 0,
reused_layers: layer_plan.cached_layers,
});
}
let mut owned_work_dir = None;
let save_dir = if let Some(save_dir) = layer_plan.save_dir.as_deref() {
save_dir.to_path_buf()
} else {
let work_dir = temp_work_dir("supermachine-layer-materialize")?;
let save_dir = source.save_arm64(image, &work_dir)?;
owned_work_dir = Some(work_dir);
save_dir
};
let result = (|| {
let saved_shas = layer_shas_from_save_dir(image, &save_dir)?;
if saved_shas != layer_plan.layer_shas {
return Err(format!(
"image layer set changed while materializing {image}; retry the command"
));
}
let mut built_layers = 0usize;
let mut reused_layers = 0usize;
for sha in &layer_plan.layer_shas {
let layer_squashfs = layer_plan.cache_dir.join(format!("{sha}.squashfs"));
if layer_squashfs.is_file() {
reused_layers += 1;
continue;
}
let blob = save_dir.join("blobs/sha256").join(sha);
if !blob.is_file() {
return Err(format!("layer blob {} missing from image save", sha));
}
let unique = TEMP_COUNTER.fetch_add(1, Ordering::Relaxed);
let layer_extract = layer_plan.cache_dir.join(format!(
"{sha}.extract.{}.{}",
std::process::id(),
unique
));
let tmp_squashfs = layer_plan.cache_dir.join(format!(
".{sha}.squashfs.{}.{}.tmp",
std::process::id(),
unique
));
let _ = std::fs::remove_dir_all(&layer_extract);
let _ = std::fs::remove_file(&tmp_squashfs);
std::fs::create_dir_all(&layer_extract)
.map_err(|e| format!("create layer extract {}: {e}", layer_extract.display()))?;
let built: Result<bool, String> = (|| {
let mut tar = Command::new("tar");
tar.arg("-xf")
.arg(&blob)
.arg("-C")
.arg(&layer_extract)
.stdout(Stdio::null())
.stderr(Stdio::null());
let _ = spawn_status(tar, "tar extract OCI layer")?;
remove_oci_whiteouts(&layer_extract)?;
if layer_squashfs.is_file() {
return Ok(false);
}
let mut squash = Command::new("mksquashfs");
squash
.arg(&layer_extract)
.arg(&tmp_squashfs)
.arg("-comp")
.arg("zstd")
.arg("-no-xattrs")
.arg("-noappend")
.arg("-quiet")
.stdout(Stdio::null())
.stderr(Stdio::null());
run_status(squash, "mksquashfs layer")?;
if layer_squashfs.is_file() {
let _ = std::fs::remove_file(&tmp_squashfs);
return Ok(false);
}
std::fs::rename(&tmp_squashfs, &layer_squashfs).map_err(|e| {
format!(
"install layer squashfs {} -> {}: {e}",
tmp_squashfs.display(),
layer_squashfs.display()
)
})?;
Ok(true)
})();
let _ = std::fs::remove_dir_all(&layer_extract);
if built.is_err() {
let _ = std::fs::remove_file(&tmp_squashfs);
}
if built? {
built_layers += 1;
} else {
reused_layers += 1;
}
}
Ok(LayerMaterialization {
materialize_ms: elapsed_ms(t0),
built_layers,
reused_layers,
})
})();
if let Some(work_dir) = owned_work_dir {
let _ = std::fs::remove_dir_all(work_dir);
}
result
}
fn arg_values<'a>(args: &'a [String], flag: &str) -> Vec<&'a str> {
let mut values = Vec::new();
let mut i = 0;
while i < args.len() {
if args[i] == flag && i + 1 < args.len() {
values.push(args[i + 1].as_str());
i += 2;
} else {
i += 1;
}
}
values
}
fn arg_present(args: &[String], flag: &str) -> bool {
args.iter().any(|arg| arg == flag)
}
fn arg_value<'a>(args: &'a [String], flag: &str) -> Option<&'a str> {
arg_values(args, flag).into_iter().last()
}
fn sha256_file(path: &Path) -> Result<String, String> {
let mut cmd = Command::new("shasum");
cmd.arg("-a").arg("256").arg(path);
let out = command_output(cmd, "shasum file")?;
out.split_whitespace()
.next()
.map(ToOwned::to_owned)
.ok_or_else(|| format!("shasum produced no digest for {}", path.display()))
}
fn sha256_text(text: &str) -> Result<String, String> {
let work_dir = temp_work_dir("supermachine-sha")?;
let path = work_dir.join("input");
let result = (|| {
std::fs::write(&path, text).map_err(|e| format!("write {}: {e}", path.display()))?;
sha256_file(&path)
})();
let _ = std::fs::remove_dir_all(&work_dir);
result
}
fn file_mode_octal(path: &Path) -> Result<String, String> {
let mode = std::fs::metadata(path)
.map_err(|e| format!("stat {}: {e}", path.display()))?
.permissions()
.mode()
& 0o7777;
Ok(format!("{mode:o}"))
}
fn file_size_mtime(path: &Path) -> Result<String, String> {
let meta = std::fs::metadata(path).map_err(|e| format!("stat {}: {e}", path.display()))?;
let mtime = meta
.modified()
.map_err(|e| format!("mtime {}: {e}", path.display()))?
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("mtime before epoch {}: {e}", path.display()))?
.as_secs();
Ok(format!("{}:{mtime}", meta.len()))
}
fn set_mode(path: &Path, mode: u32) -> Result<(), String> {
let mut permissions = std::fs::metadata(path)
.map_err(|e| format!("stat {}: {e}", path.display()))?
.permissions();
permissions.set_mode(mode);
std::fs::set_permissions(path, permissions)
.map_err(|e| format!("chmod {:o} {}: {e}", mode, path.display()))
}
fn write_lines(path: &Path, lines: &[String]) -> Result<(), String> {
let mut text = String::new();
for line in lines {
text.push_str(line);
text.push('\n');
}
std::fs::write(path, text).map_err(|e| format!("write {}: {e}", path.display()))
}
fn ensure_init_oci(root: &Path) -> Result<PathBuf, String> {
let dir = [
root.join("crates/supermachine/oci"),
root.join("share/supermachine/oci"),
root.join("crates/supermachine/oci"),
]
.into_iter()
.find(|p| p.is_dir())
.unwrap_or_else(|| root.join("crates/supermachine/oci"));
let src = dir.join("init-oci.c");
let bin = dir.join("init-oci");
if bin.is_file() {
let executable = std::fs::metadata(&bin)
.ok()
.map(|m| m.permissions().mode() & 0o111 != 0)
.unwrap_or(false);
let src_meta = std::fs::metadata(&src).ok();
let up_to_date = match src_meta {
Some(s) => std::fs::metadata(&bin)
.ok()
.and_then(|b| Some((b.modified().ok()?, s.modified().ok()?)))
.map(|(b, s)| b >= s)
.unwrap_or(false),
None => true, };
if executable && up_to_date {
return Ok(bin);
}
}
if !src.is_file() {
let assets = crate::assets::AssetPaths::discover();
if let Some(p) = assets.init_oci_bin {
return Ok(p);
}
}
if src.is_file() {
let mut cmd = Command::new("zig");
cmd.current_dir(&dir)
.arg("cc")
.arg("--target=aarch64-linux-musl")
.arg("-static")
.arg("-O2")
.arg("-o")
.arg("init-oci")
.arg("init-oci.c")
.stdout(Stdio::null())
.stderr(Stdio::null());
run_status(cmd, "build init-oci")?;
if bin.is_file() {
return Ok(bin);
}
}
Err(format!(
"missing init-oci: not found in dev tree ({}), bundled assets, or via zig build",
bin.display()
))
}
fn ensure_supermachine_agent(root: &Path) -> Result<PathBuf, String> {
let installed = root.join("share/supermachine/supermachine-agent");
if installed.is_file() {
return Ok(installed);
}
let crate_dir = root.join("crates/supermachine-guest-agent");
let dev_bin = crate_dir
.join("target/aarch64-unknown-linux-musl/release/supermachine-agent");
if dev_bin.is_file() {
let src = crate_dir.join("src/main.rs");
let up_to_date = match (
std::fs::metadata(&dev_bin).ok(),
std::fs::metadata(&src).ok(),
) {
(Some(b), Some(s)) => b.modified().ok() >= s.modified().ok(),
(Some(_), None) => true,
_ => false,
};
if up_to_date {
return Ok(dev_bin);
}
}
if !crate_dir.is_dir() {
let assets = crate::assets::AssetPaths::discover();
if let Some(p) = assets.supermachine_agent {
return Ok(p);
}
return Err(format!(
"supermachine-agent: not found at {} (release tarball), \
{} (dev tree), or in the bundled assets from supermachine-kernel",
installed.display(),
dev_bin.display()
));
}
let path = std::env::var("PATH").unwrap_or_default();
let new_path = format!("{}:{}", crate_dir.display(), path);
let mut cmd = Command::new("cargo");
cmd.current_dir(&crate_dir)
.arg("build")
.arg("--release")
.env("PATH", new_path)
.stdout(Stdio::null())
.stderr(Stdio::piped());
run_status(cmd, "build supermachine-agent")?;
if !dev_bin.is_file() {
return Err(format!(
"supermachine-agent build did not produce {}",
dev_bin.display()
));
}
Ok(dev_bin)
}
fn build_initramfs(root: &Path, out_dir: &Path) -> Result<(PathBuf, u128), String> {
let t0 = Instant::now();
let init = ensure_init_oci(root)?;
let init_dir = init
.parent()
.map(|p| p.to_path_buf())
.unwrap_or_else(|| root.join("crates/supermachine/oci"));
let cache = init_dir.join("init-oci.cpio.gz");
let rebuild = match (std::fs::metadata(&cache), std::fs::metadata(&init)) {
(Ok(cache_meta), Ok(init_meta)) => cache_meta.modified().ok() < init_meta.modified().ok(),
_ => true,
};
if rebuild {
let stage = temp_work_dir("supermachine-initramfs-stage")?;
let result: Result<(), String> = (|| {
std::fs::copy(&init, stage.join("init"))
.map_err(|e| format!("copy init into initramfs stage: {e}"))?;
set_mode(&stage.join("init"), 0o755)?;
for dir in ["proc", "sys", "dev", "newroot"] {
std::fs::create_dir_all(stage.join(dir))
.map_err(|e| format!("create initramfs dir {dir}: {e}"))?;
}
let script =
"cd \"$1\" && find . -mindepth 1 -print | sed 's|^\\./||' | cpio -o -H newc 2>/dev/null | gzip > \"$2\"";
let mut cmd = Command::new("sh");
cmd.arg("-c")
.arg(script)
.arg("sh")
.arg(&stage)
.arg(&cache)
.stdout(Stdio::null());
run_status(cmd, "build initramfs cpio")?;
Ok(())
})();
let _ = std::fs::remove_dir_all(&stage);
result?;
}
let out = out_dir.join("init.cpio.gz");
std::fs::copy(&cache, &out).map_err(|e| {
format!(
"copy initramfs cache {} -> {}: {e}",
cache.display(),
out.display()
)
})?;
Ok((out, elapsed_ms(t0)))
}
fn write_env_json(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
out_dir: &Path,
) -> Result<PathBuf, String> {
let json = env_json_value(plan, resolution);
let path = out_dir.join("env.json");
std::fs::write(
&path,
serde_json::to_vec_pretty(&json).map_err(|e| format!("encode env.json: {e}"))?,
)
.map_err(|e| format!("write {}: {e}", path.display()))?;
Ok(path)
}
fn env_json_value(plan: &BakePlan<'_>, resolution: &ImageResolution) -> serde_json::Value {
let mut env = serde_json::Map::new();
for line in &resolution.env {
if let Some((k, v)) = line.split_once('=') {
env.insert(k.to_owned(), serde_json::Value::String(v.to_owned()));
}
}
for line in arg_values(plan.extra_args, "--env") {
if let Some((k, v)) = line.split_once('=') {
env.insert(k.to_owned(), serde_json::Value::String(v.to_owned()));
}
}
serde_json::json!({ "env": env, "secrets": {} })
}
fn read_to_string_lossy(path: &Path) -> String {
std::fs::read(path)
.map(|bytes| String::from_utf8_lossy(&bytes).into_owned())
.unwrap_or_default()
}
fn log_has(path: &Path, needle: &str) -> bool {
read_to_string_lossy(path).contains(needle)
}
fn log_has_failure(path: &Path) -> bool {
let text = read_to_string_lossy(path);
text.lines().any(|line| {
line.starts_with("FATAL:")
|| line.starts_with("error:")
|| line.contains("panic")
|| line.contains("snapshot") && line.contains("ERR")
})
}
fn last_line_containing(path: &Path, needle: &str) -> Option<String> {
read_to_string_lossy(path)
.lines()
.filter(|line| line.contains(needle))
.last()
.map(ToOwned::to_owned)
}
fn parse_snapshot_reason(line: &str) -> Option<String> {
let start = line.find("snapshot (")? + "snapshot (".len();
let end = line[start..].find("):")? + start;
Some(line[start..end].to_owned())
}
fn parse_capture_save_us(line: &str) -> (Option<u64>, Option<u64>) {
let capture = line
.split("capture ")
.nth(1)
.and_then(|s| s.split(" us").next())
.and_then(|s| s.trim().parse().ok());
let save = line
.split("save ")
.nth(1)
.and_then(|s| s.split(" us").next())
.and_then(|s| s.trim().parse().ok());
(capture, save)
}
fn parse_ram_mib(line: &str) -> (Option<u64>, Option<u64>) {
let Some(after_data) = line.split("data ").nth(1) else {
return (None, None);
};
let data = after_data
.split(" MiB")
.next()
.and_then(|s| s.trim().parse().ok());
let zero = line
.split("zero ")
.nth(1)
.and_then(|s| s.split(" MiB").next())
.and_then(|s| s.trim().parse().ok());
(data, zero)
}
fn file_physical_bytes(path: &Path) -> Option<u64> {
let mut cmd = Command::new("du");
cmd.arg("-sk").arg(path);
command_output(cmd, "du -sk")
.ok()
.and_then(|out| {
out.split_whitespace()
.next()
.and_then(|s| s.parse::<u64>().ok())
})
.map(|kib| kib * 1024)
}
fn now_utc_iso() -> Result<String, String> {
let mut cmd = Command::new("date");
cmd.arg("-u").arg("+%Y-%m-%dT%H:%M:%SZ");
Ok(command_output(cmd, "date utc")?.trim().to_owned())
}
fn native_listener_settle_ms(plan: &BakePlan<'_>) -> u64 {
arg_value(plan.extra_args, "--supermachine-listener-settle-ms")
.and_then(|s| s.parse::<u64>().ok())
.or_else(|| {
std::env::var("SUPERMACHINE_LISTENER_SETTLE_MS")
.ok()
.and_then(|s| s.parse::<u64>().ok())
})
.unwrap_or(50)
}
fn native_snapshot_after_ms(plan: &BakePlan<'_>) -> u64 {
arg_value(plan.extra_args, "--supermachine-snapshot-after-ms")
.and_then(|s| s.parse::<u64>().ok())
.or_else(|| {
std::env::var("SUPERMACHINE_SNAPSHOT_AFTER_MS")
.ok()
.and_then(|s| s.parse::<u64>().ok())
})
.unwrap_or(7000)
}
fn native_supermachine_base_inputs(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
root: &Path,
) -> Result<serde_json::Value, String> {
let sm22_bin = supermachine_worker_bin(root);
let kernel = supermachine_kernel(root);
let init = ensure_init_oci(root)?;
let agent = ensure_supermachine_agent(root)?;
let snapshot_hash_mode =
std::env::var("SUPERMACHINE_SNAPSHOT_HASH_MODE").unwrap_or_else(|_| "fast".to_owned());
Ok(serde_json::json!({
"version": 1,
"runtime": "supermachine",
"image": plan.image,
"image_id": resolution.image_id,
"architecture": resolution.architecture,
"guest_port": plan.guest_port,
"memory_mib": plan.memory_mib,
"cmd": resolution.effective_cmd,
"working_dir": resolution.working_dir,
"user": resolution.user,
"env": env_json_value(plan, resolution),
"extra_args": plan.extra_args,
"egress_policy": arg_value(plan.extra_args, "--egress-policy").unwrap_or(""),
"listener_settle_ms": native_listener_settle_ms(plan),
"snapshot_after_ms": native_snapshot_after_ms(plan),
"runtime_bin": file_size_mtime(&sm22_bin)?,
"kernel": file_size_mtime(&kernel)?,
"init_oci": file_size_mtime(&init)?,
"agent": file_size_mtime(&agent)?,
"snapshot_hash_mode": snapshot_hash_mode,
}))
}
fn native_supermachine_cheap_input_keys() -> &'static [&'static str] {
&[
"version",
"runtime",
"image",
"guest_port",
"memory_mib",
"extra_args",
"egress_policy",
"listener_settle_ms",
"snapshot_after_ms",
"runtime_bin",
"kernel",
"init_oci",
"agent",
"snapshot_hash_mode",
]
}
fn native_supermachine_cheap_inputs(
plan: &BakePlan<'_>,
root: &Path,
) -> Result<serde_json::Value, String> {
let sm22_bin = supermachine_worker_bin(root);
let kernel = supermachine_kernel(root);
let init = ensure_init_oci(root)?;
let agent = ensure_supermachine_agent(root)?;
let snapshot_hash_mode =
std::env::var("SUPERMACHINE_SNAPSHOT_HASH_MODE").unwrap_or_else(|_| "fast".to_owned());
Ok(serde_json::json!({
"version": 1,
"runtime": "supermachine",
"image": plan.image,
"guest_port": plan.guest_port,
"memory_mib": plan.memory_mib,
"extra_args": plan.extra_args,
"egress_policy": arg_value(plan.extra_args, "--egress-policy").unwrap_or(""),
"listener_settle_ms": native_listener_settle_ms(plan),
"snapshot_after_ms": native_snapshot_after_ms(plan),
"runtime_bin": file_size_mtime(&sm22_bin)?,
"kernel": file_size_mtime(&kernel)?,
"init_oci": file_size_mtime(&init)?,
"agent": file_size_mtime(&agent)?,
"snapshot_hash_mode": snapshot_hash_mode,
}))
}
fn try_fast_cache_hit(
plan: &BakePlan<'_>,
root: &Path,
run_t0: Instant,
) -> Result<Option<NativeBakeResult>, String> {
if plan.pull_policy == "always" {
return Ok(None);
}
if snapshot_reuse_disabled() || !native_supermachine_early_reuse_supported(plan) {
return Ok(None);
}
let meta_path = plan.metadata_path();
let text = match std::fs::read_to_string(&meta_path) {
Ok(text) => text,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => return Err(format!("read metadata {}: {e}", meta_path.display())),
};
let meta: serde_json::Value = serde_json::from_str(&text)
.map_err(|e| format!("parse metadata {}: {e}", meta_path.display()))?;
if meta.get("supermachine_version").and_then(|v| v.as_str()) != Some("supermachine") {
return Ok(None);
}
let Some(meta_inputs) = meta.get("native_bake_inputs").and_then(|v| v.as_object()) else {
return Ok(None);
};
let cheap = native_supermachine_cheap_inputs(plan, root)?;
let cheap_obj = cheap
.as_object()
.ok_or_else(|| "cheap bake inputs was not an object".to_owned())?;
for &key in native_supermachine_cheap_input_keys() {
if meta_inputs.get(key) != cheap_obj.get(key) {
return Ok(None);
}
}
if !metadata_snapshot_files_exist(&meta) {
return Ok(None);
}
let timings = serde_json::json!({
"total_ms": 0,
"snapshot_reused": true,
"fast_snapshot_reused": true,
"reuse_check_total_ms": elapsed_ms(run_t0),
});
Ok(Some(NativeBakeResult {
total_ms: 0,
timings,
reused: true,
}))
}
fn native_supermachine_bake_key(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
layer_plan: &LayerPlan,
delta: &DeltaMaterialization,
root: &Path,
) -> Result<(String, serde_json::Value), String> {
let delta_key = delta
.key
.as_deref()
.ok_or_else(|| "native supermachine bake needs delta key".to_owned())?;
let mut inputs = native_supermachine_base_inputs(plan, resolution, root)?;
let obj = inputs
.as_object_mut()
.ok_or_else(|| "native bake inputs was not an object".to_owned())?;
obj.insert(
"layers".to_owned(),
serde_json::json!(layer_plan.layer_shas),
);
obj.insert("delta_key".to_owned(), serde_json::json!(delta_key));
let encoded =
serde_json::to_string(&inputs).map_err(|e| format!("encode native bake key: {e}"))?;
let key = sha256_text(&format!("{encoded}\n"))?;
Ok((key, inputs))
}
fn metadata_string(value: &serde_json::Value, key: &str) -> Option<String> {
value.get(key)?.as_str().map(ToOwned::to_owned)
}
fn metadata_path_value(meta: &serde_json::Value, key: &str) -> Option<PathBuf> {
metadata_string(meta, key).map(PathBuf::from)
}
fn metadata_path_exists(meta: &serde_json::Value, key: &str) -> bool {
metadata_path_value(meta, key)
.as_deref()
.map(Path::exists)
.unwrap_or(false)
}
fn snapshot_reuse_disabled() -> bool {
std::env::var("SUPERMACHINE_SNAPSHOT_REUSE")
.map(|v| v == "0" || v == "false")
.unwrap_or(false)
}
fn previous_metadata_cmd(plan: &BakePlan<'_>) -> Option<Vec<String>> {
let meta_path = plan.metadata_path();
let text = std::fs::read_to_string(&meta_path).ok()?;
let meta: serde_json::Value = serde_json::from_str(&text).ok()?;
if meta.get("image").and_then(|v| v.as_str()) != Some(plan.image) {
return None;
}
let arr = meta.get("cmd")?.as_array()?;
let mut out = Vec::with_capacity(arr.len());
for v in arr {
out.push(v.as_str()?.to_owned());
}
if out.is_empty() { None } else { Some(out) }
}
fn metadata_snapshot_files_exist(meta: &serde_json::Value) -> bool {
if !metadata_path_exists(meta, "snapshot_base")
|| !metadata_path_exists(meta, "delta_squashfs")
|| !metadata_path_exists(meta, "init_cpio")
{
return false;
}
meta.get("layers")
.and_then(|v| v.as_array())
.map(|layers| {
!layers.is_empty()
&& layers.iter().all(|layer| {
layer
.as_str()
.map(Path::new)
.map(Path::exists)
.unwrap_or(false)
})
})
.unwrap_or(false)
}
fn native_supermachine_early_reuse_supported(plan: &BakePlan<'_>) -> bool {
!arg_present(plan.extra_args, "--extra-file")
&& !arg_present(plan.extra_args, "--inbound-tls-autogen")
&& arg_value(plan.extra_args, "--inbound-tls-cert").is_none()
&& arg_value(plan.extra_args, "--inbound-tls-key").is_none()
}
fn native_supermachine_early_reuse_snapshot(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
root: &Path,
run_t0: Instant,
) -> Result<Option<NativeBakeResult>, String> {
if snapshot_reuse_disabled() || !native_supermachine_early_reuse_supported(plan) {
return Ok(None);
}
let meta_path = plan.metadata_path();
let text = match std::fs::read_to_string(&meta_path) {
Ok(text) => text,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => return Err(format!("read metadata {}: {e}", meta_path.display())),
};
let meta: serde_json::Value = serde_json::from_str(&text)
.map_err(|e| format!("parse metadata {}: {e}", meta_path.display()))?;
if meta.get("supermachine_version").and_then(|v| v.as_str()) != Some("supermachine") {
return Ok(None);
}
let Some(meta_inputs) = meta.get("native_bake_inputs").and_then(|v| v.as_object()) else {
return Ok(None);
};
let current_inputs = native_supermachine_base_inputs(plan, resolution, root)?;
let current_obj = current_inputs
.as_object()
.ok_or_else(|| "native base inputs was not an object".to_owned())?;
for (key, value) in current_obj {
if meta_inputs.get(key) != Some(value) {
return Ok(None);
}
}
if !metadata_snapshot_files_exist(&meta) {
return Ok(None);
}
let timings = serde_json::json!({
"total_ms": 0,
"snapshot_reused": true,
"early_snapshot_reused": true,
"reuse_check_total_ms": elapsed_ms(run_t0),
});
Ok(Some(NativeBakeResult {
total_ms: 0,
timings,
reused: true,
}))
}
fn native_supermachine_reuse_snapshot(
plan: &BakePlan<'_>,
expected_bake_key: &str,
run_t0: Instant,
) -> Result<Option<NativeBakeResult>, String> {
if snapshot_reuse_disabled() {
return Ok(None);
}
let meta_path = plan.metadata_path();
let text = match std::fs::read_to_string(&meta_path) {
Ok(text) => text,
Err(e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(None),
Err(e) => return Err(format!("read metadata {}: {e}", meta_path.display())),
};
let meta: serde_json::Value = serde_json::from_str(&text)
.map_err(|e| format!("parse metadata {}: {e}", meta_path.display()))?;
if meta.get("supermachine_version").and_then(|v| v.as_str()) != Some("supermachine") {
return Ok(None);
}
if meta.get("native_bake_key").and_then(|v| v.as_str()) != Some(expected_bake_key) {
return Ok(None);
}
if !metadata_snapshot_files_exist(&meta) {
return Ok(None);
}
let total_ms = 0;
let timings = serde_json::json!({
"total_ms": total_ms,
"snapshot_reused": true,
"reuse_check_total_ms": elapsed_ms(run_t0),
});
Ok(Some(NativeBakeResult {
total_ms,
timings,
reused: true,
}))
}
fn run_native_supermachine_bake(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
layer_plan: &LayerPlan,
delta: &DeltaMaterialization,
root: &Path,
native_bake_key: &str,
native_bake_inputs: &serde_json::Value,
) -> Result<NativeBakeResult, String> {
let total_t0 = Instant::now();
let out_dir = plan.snapshots_dir.join(plan.snapshot_name());
std::fs::create_dir_all(&out_dir)
.map_err(|e| format!("create snapshot dir {}: {e}", out_dir.display()))?;
let delta_cache = delta
.cache_path
.as_ref()
.filter(|p| p.is_file())
.ok_or_else(|| "native supermachine bake needs materialized delta cache".to_owned())?;
let squash_t0 = Instant::now();
let delta_squashfs = out_dir.join("delta.squashfs");
std::fs::copy(delta_cache, &delta_squashfs).map_err(|e| {
format!(
"copy delta cache {} -> {}: {e}",
delta_cache.display(),
delta_squashfs.display()
)
})?;
let squashfs_ms = elapsed_ms(squash_t0);
let env_json = write_env_json(plan, resolution, &out_dir)?;
let (init_cpio, initramfs_ms) = build_initramfs(root, &out_dir)?;
let sm22_bin = supermachine_worker_bin(root);
let kernel = supermachine_kernel(root);
let snap_base = out_dir.join("restore.snap");
let log = out_dir.join("bake.log");
let _ = std::fs::remove_file(&snap_base);
let log_file = std::fs::File::create(&log)
.map_err(|e| format!("create bake log {}: {e}", log.display()))?;
let log_err = log_file
.try_clone()
.map_err(|e| format!("clone bake log fd: {e}"))?;
let listener_settle_ms = native_listener_settle_ms(plan);
let snapshot_after_ms = native_snapshot_after_ms(plan);
let host_time = std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("host time before epoch: {e}"))?
.as_secs();
let bake_t0 = Instant::now();
let mut cmd = Command::new(&sm22_bin);
cmd.arg("--kernel")
.arg(&kernel)
.arg("--initramfs")
.arg(&init_cpio);
let layer_paths: Vec<PathBuf> = layer_plan
.layer_shas
.iter()
.map(|sha| layer_plan.cache_dir.join(format!("{sha}.squashfs")))
.collect();
for layer in &layer_paths {
cmd.arg("--virtio-blk").arg(layer);
}
cmd.arg("--virtio-blk")
.arg(&delta_squashfs);
let volumes = parse_volume_args(plan.extra_args)?;
for vol in &volumes {
ensure_volume_host_file(vol, crate::vmm::resources::VolumeSpec::DEFAULT_SIZE_BYTES)?;
cmd.arg("--volume").arg(format!(
"{}:{}",
vol.host_file.display(),
vol.guest_path
));
}
cmd.arg("--memory")
.arg(plan.memory_mib.to_string())
.arg("--vcpus")
.arg(plan.vcpus.to_string())
.arg("--cmdline")
.arg(format!(
"earlycon=pl011,mmio32,0x09000000 console=ttyAMA0 tsi_hijack supermachine.host_time={host_time}"
));
if volumes.is_empty() {
cmd.arg("--snapshot-on-listener")
.arg("--snapshot-after-ms")
.arg(snapshot_after_ms.to_string())
.arg("--quiesce-ms")
.arg(listener_settle_ms.to_string());
} else {
cmd.arg("--snapshot-at")
.arg("1");
}
cmd.arg("--snapshot-out")
.arg(&snap_base)
.arg("--env-file")
.arg(&env_json)
.stdout(Stdio::from(log_file))
.stderr(Stdio::from(log_err));
let mut child = cmd
.spawn()
.map_err(|e| format!("spawn supermachine bake: {e}"))?;
let mut listener_ready_ms = None;
let mut snapshot_trigger_ms = None;
let mut snapshot_line_ms = None;
let deadline = Instant::now() + std::time::Duration::from_secs(240);
while Instant::now() < deadline {
if listener_ready_ms.is_none() && log_has(&log, "listener readiness") {
listener_ready_ms = Some(elapsed_ms(bake_t0));
}
if snapshot_trigger_ms.is_none() && log_has(&log, "snapshot trigger (") {
snapshot_trigger_ms = Some(elapsed_ms(bake_t0));
}
if snapshot_line_ms.is_none() && log_has(&log, "snapshot (") {
snapshot_line_ms = Some(elapsed_ms(bake_t0));
}
if snap_base.is_file() {
let _ = child.wait();
break;
}
if child
.try_wait()
.map_err(|e| format!("poll supermachine bake: {e}"))?
.is_some()
{
break;
}
if log_has_failure(&log) {
let _ = child.kill();
let _ = child.wait();
return Err(format!("supermachine bake failed; see {}", log.display()));
}
std::thread::sleep(std::time::Duration::from_millis(50));
}
let vmm_bake_ms = elapsed_ms(bake_t0);
if !snap_base.is_file() {
let _ = child.kill();
let _ = child.wait();
return Err(format!("supermachine snapshot timeout; see {}", log.display()));
}
if listener_ready_ms.is_none() && log_has(&log, "listener readiness") {
listener_ready_ms = Some(elapsed_ms(bake_t0));
}
if snapshot_trigger_ms.is_none() && log_has(&log, "snapshot trigger (") {
snapshot_trigger_ms = Some(elapsed_ms(bake_t0));
}
if snapshot_line_ms.is_none() && log_has(&log, "snapshot (") {
snapshot_line_ms = Some(elapsed_ms(bake_t0));
}
let snapshot_line = last_line_containing(&log, "snapshot (").unwrap_or_default();
let snapshot_reason = parse_snapshot_reason(&snapshot_line).unwrap_or_default();
let (snapshot_capture_us, snapshot_save_us) = parse_capture_save_us(&snapshot_line);
let (snapshot_ram_data_mib, snapshot_ram_zero_mib) = parse_ram_mib(&snapshot_line);
let snapshot_logical_bytes = std::fs::metadata(&snap_base).ok().map(|m| m.len());
let snapshot_physical_bytes = file_physical_bytes(&snap_base);
let listener_to_trigger_ms = listener_ready_ms
.zip(snapshot_trigger_ms)
.map(|(a, b)| b.saturating_sub(a));
let listener_to_snapshot_ms = listener_ready_ms
.zip(snapshot_line_ms)
.map(|(a, b)| b.saturating_sub(a));
let metadata_t0 = Instant::now();
let runtime_sha = sha256_file(&sm22_bin)?;
let runtime_sha16 = &runtime_sha[..runtime_sha.len().min(16)];
let hash_mode =
std::env::var("SUPERMACHINE_SNAPSHOT_HASH_MODE").unwrap_or_else(|_| "fast".to_owned());
let snapshot_id = compute_snapshot_id(plan, &snap_base, runtime_sha16, &hash_mode)?;
let baked_at = now_utc_iso()?;
let egress_policy = arg_value(plan.extra_args, "--egress-policy").unwrap_or("");
let balloon_target_pages = plan.memory_mib as u64 * 256 * 70 / 100;
let metadata_prepare_ms = elapsed_ms(metadata_t0);
let total_ms = elapsed_ms(total_t0);
let timings = serde_json::json!({
"total_ms": total_ms,
"pull_inspect_ms": resolution.inspect_ms,
"rootfs_prepare_ms": layer_plan.plan_ms,
"rootfs_customize_ms": delta.prepare_ms,
"init_oci_build_ms": 0,
"squashfs_ms": squashfs_ms,
"initramfs_ms": initramfs_ms,
"vmm_bake_ms": vmm_bake_ms,
"metadata_prepare_ms": metadata_prepare_ms,
"guest_boot_to_listener_ms": listener_ready_ms,
"listener_settle_config_ms": listener_settle_ms,
"listener_to_snapshot_trigger_ms": listener_to_trigger_ms,
"listener_to_snapshot_ms": listener_to_snapshot_ms,
"snapshot_capture_us": snapshot_capture_us,
"snapshot_save_us": snapshot_save_us,
"snapshot_reason": snapshot_reason,
});
let volumes_meta: Vec<serde_json::Value> = parse_volume_args(plan.extra_args)?
.into_iter()
.map(|v| {
serde_json::json!({
"host_file": v.host_file.to_string_lossy(),
"guest_path": v.guest_path,
})
})
.collect();
let restart_policy = arg_value(plan.extra_args, "--restart").unwrap_or("no");
let health_cmd = arg_value(plan.extra_args, "--health-cmd").unwrap_or("");
let health_interval_secs: u32 = arg_value(plan.extra_args, "--health-interval")
.and_then(|s| s.parse().ok())
.unwrap_or(if health_cmd.is_empty() { 0 } else { 30 });
let metadata = serde_json::json!({
"name": plan.snapshot_name(),
"image": plan.image,
"port": plan.guest_port,
"memory_mib": plan.memory_mib,
"cmd": resolution.effective_cmd,
"snapshot_sha16": snapshot_id,
"snapshot_id16": snapshot_id,
"snapshot_hash_mode": hash_mode,
"runtime_sha16": runtime_sha16,
"layers": layer_paths,
"volumes": volumes_meta,
"restart_policy": restart_policy,
"health_cmd": health_cmd,
"health_interval_secs": health_interval_secs,
"delta_squashfs": delta_squashfs,
"rootfs_squashfs": serde_json::Value::Null,
"snapshot_base": snap_base,
"snapshot_logical_bytes": snapshot_logical_bytes,
"snapshot_physical_bytes": snapshot_physical_bytes,
"snapshot_ram_data_mib": snapshot_ram_data_mib,
"snapshot_ram_zero_mib": snapshot_ram_zero_mib,
"init_cpio": init_cpio,
"kernel": kernel,
"egress_policy": egress_policy,
"vcpus": plan.vcpus,
"ttl_seconds": serde_json::Value::Null,
"egress_bps": serde_json::Value::Null,
"cpu_nice": serde_json::Value::Null,
"cpu_affinity": serde_json::Value::Null,
"cpu_qos": serde_json::Value::Null,
"balloon_target_pages": balloon_target_pages,
"auth": {"type": "none"},
"native_bake_key": native_bake_key,
"native_bake_inputs": native_bake_inputs,
"timings": timings,
"baked_at": baked_at,
"supermachine_version": "supermachine",
});
let meta_path = out_dir.join("metadata.json");
std::fs::write(
&meta_path,
serde_json::to_vec_pretty(&metadata).map_err(|e| format!("encode metadata: {e}"))?,
)
.map_err(|e| format!("write metadata {}: {e}", meta_path.display()))?;
Ok(NativeBakeResult {
total_ms,
timings,
reused: false,
})
}
#[allow(clippy::too_many_arguments)]
fn run_native_supermachine_bake_pipelined(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
layer_plan: &LayerPlan,
delta: &DeltaMaterialization,
root: &Path,
native_bake_key: &str,
native_bake_inputs: &serde_json::Value,
pipelined: PipelinedWarmup<'_>,
) -> Result<NativeBakeResult, String> {
use std::io::{BufRead, BufReader, Write};
use std::os::unix::net::UnixListener;
let total_t0 = Instant::now();
let out_dir = plan.snapshots_dir.join(plan.snapshot_name());
std::fs::create_dir_all(&out_dir)
.map_err(|e| format!("create snapshot dir {}: {e}", out_dir.display()))?;
std::fs::create_dir_all(&pipelined.warm_dir).map_err(|e| {
format!(
"create warm snapshot dir {}: {e}",
pipelined.warm_dir.display()
)
})?;
let delta_cache = delta
.cache_path
.as_ref()
.filter(|p| p.is_file())
.ok_or_else(|| "pipelined bake needs materialized delta cache".to_owned())?;
let squash_t0 = Instant::now();
let delta_squashfs = out_dir.join("delta.squashfs");
std::fs::copy(delta_cache, &delta_squashfs).map_err(|e| {
format!(
"copy delta cache {} -> {}: {e}",
delta_cache.display(),
delta_squashfs.display()
)
})?;
let squashfs_ms = elapsed_ms(squash_t0);
let env_json = write_env_json(plan, resolution, &out_dir)?;
let (init_cpio, initramfs_ms) = build_initramfs(root, &out_dir)?;
let sm22_bin = supermachine_worker_bin(root);
let kernel = supermachine_kernel(root);
let snap_base = out_dir.join("restore.snap");
let snap_warm = pipelined.warm_dir.join("restore.snap");
let log = out_dir.join("bake.log");
let _ = std::fs::remove_file(&snap_base);
let _ = std::fs::remove_file(&snap_warm);
let log_file = std::fs::File::create(&log)
.map_err(|e| format!("create bake log {}: {e}", log.display()))?;
let log_err = log_file
.try_clone()
.map_err(|e| format!("clone bake log fd: {e}"))?;
let listener_settle_ms = native_listener_settle_ms(plan);
let snapshot_after_ms = native_snapshot_after_ms(plan);
let host_time = std::time::SystemTime::now()
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("host time before epoch: {e}"))?
.as_secs();
let suffix = format!(
"{}-{}-{}",
std::process::id(),
host_time,
TEMP_COUNTER.fetch_add(1, Ordering::Relaxed)
);
let socks_dir = std::env::temp_dir().join(format!("supermachine-bake-{suffix}"));
std::fs::create_dir_all(&socks_dir)
.map_err(|e| format!("create bake socks dir {}: {e}", socks_dir.display()))?;
let vsock_mux_path = socks_dir.join("vsock-mux.sock");
let vsock_exec_path = socks_dir.join("vsock-exec.sock");
let ctl_path = socks_dir.join("ctl.sock");
let _ = std::fs::remove_file(&vsock_mux_path);
let _ = std::fs::remove_file(&vsock_exec_path);
let _ = std::fs::remove_file(&ctl_path);
let ctl_listener = UnixListener::bind(&ctl_path)
.map_err(|e| format!("bind bake ctl socket {}: {e}", ctl_path.display()))?;
let bake_t0 = Instant::now();
let mut cmd = Command::new(&sm22_bin);
cmd.arg("--kernel")
.arg(&kernel)
.arg("--initramfs")
.arg(&init_cpio);
let layer_paths: Vec<PathBuf> = layer_plan
.layer_shas
.iter()
.map(|sha| layer_plan.cache_dir.join(format!("{sha}.squashfs")))
.collect();
for layer in &layer_paths {
cmd.arg("--virtio-blk").arg(layer);
}
cmd.arg("--virtio-blk").arg(&delta_squashfs);
let volumes = parse_volume_args(plan.extra_args)?;
for vol in &volumes {
ensure_volume_host_file(vol, crate::vmm::resources::VolumeSpec::DEFAULT_SIZE_BYTES)?;
cmd.arg("--volume").arg(format!(
"{}:{}",
vol.host_file.display(),
vol.guest_path
));
}
cmd.arg("--memory")
.arg(plan.memory_mib.to_string())
.arg("--vcpus")
.arg(plan.vcpus.to_string())
.arg("--cmdline")
.arg(format!(
"earlycon=pl011,mmio32,0x09000000 console=ttyAMA0 tsi_hijack supermachine.host_time={host_time}"
));
if volumes.is_empty() {
cmd.arg("--snapshot-on-listener")
.arg("--snapshot-after-ms")
.arg(snapshot_after_ms.to_string())
.arg("--quiesce-ms")
.arg(listener_settle_ms.to_string());
} else {
cmd.arg("--snapshot-at").arg("1");
}
cmd.arg("--env-file")
.arg(&env_json)
.arg("--vsock-mux")
.arg(&vsock_mux_path)
.arg("--vsock-exec")
.arg(&vsock_exec_path)
.arg("--pool-worker")
.arg(&ctl_path)
.stdout(Stdio::from(log_file))
.stderr(Stdio::from(log_err));
let mut child = cmd
.spawn()
.map_err(|e| format!("spawn pipelined-bake worker: {e}"))?;
ctl_listener
.set_nonblocking(true)
.map_err(|e| format!("set ctl listener nonblocking: {e}"))?;
let deadline = Instant::now() + Duration::from_secs(10);
let mut backoff = Duration::from_millis(1);
let stream = loop {
match ctl_listener.accept() {
Ok((s, _)) => break s,
Err(e) if e.kind() == std::io::ErrorKind::WouldBlock => {
if Instant::now() > deadline {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: worker ctl connect did not arrive within 10s; \
see {}",
log.display()
));
}
std::thread::sleep(backoff);
backoff = (backoff * 2).min(Duration::from_millis(10));
}
Err(e) => {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!("pipelined bake: ctl accept: {e}"));
}
}
};
stream
.set_nonblocking(false)
.map_err(|e| format!("set ctl stream blocking: {e}"))?;
let writer = stream
.try_clone()
.map_err(|e| format!("clone ctl stream: {e}"))?;
let mut reader = BufReader::new(stream);
let mut writer = writer;
let mut line = String::new();
reader
.read_line(&mut line)
.map_err(|e| format!("pipelined bake: read READY: {e}"))?;
if line.trim() != "READY" {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: expected READY, got {:?}; see {}",
line.trim(),
log.display()
));
}
let bake_ready_t0 = Instant::now();
let mut listener_ready_ms: Option<u128> = None;
line.clear();
reader
.read_line(&mut line)
.map_err(|e| format!("pipelined bake: read BAKE_READY: {e}"))?;
if line.trim() != "BAKE_READY" {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: expected BAKE_READY, got {:?}; see {}",
line.trim(),
log.display()
));
}
let bake_ready_ms = bake_ready_t0.elapsed().as_millis();
if log_has(&log, "listener readiness") {
listener_ready_ms = Some(elapsed_ms(bake_t0));
}
let async_send_t0 = Instant::now();
let snap_base_str = snap_base
.to_str()
.ok_or_else(|| "snap_base path not UTF-8".to_owned())?;
writeln!(writer, "SNAPSHOT_ASYNC {snap_base_str}")
.map_err(|e| format!("write SNAPSHOT_ASYNC: {e}"))?;
writer
.flush()
.map_err(|e| format!("flush SNAPSHOT_ASYNC: {e}"))?;
line.clear();
reader
.read_line(&mut line)
.map_err(|e| format!("read DONE_SNAPSHOT_ASYNC: {e}"))?;
let line_trim = line.trim();
let mut base_capture_us: u64 = 0;
if let Some(rest) = line_trim.strip_prefix("DONE_SNAPSHOT_ASYNC") {
for kv in rest.split_ascii_whitespace() {
if let Some(v) = kv.strip_prefix("capture_us=") {
base_capture_us = v.parse().unwrap_or(0);
}
}
} else if let Some(rest) = line_trim.strip_prefix("ERR_SNAPSHOT ") {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: base snapshot failed: {}; see {}",
rest.trim(),
log.display()
));
} else {
let _ = child.kill();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: bad SNAPSHOT_ASYNC response {:?}; see {}",
line_trim,
log.display()
));
}
let async_send_ms = async_send_t0.elapsed().as_millis();
let warmup_t0 = Instant::now();
let warmup_ctx = PipelinedWarmupContext {
vsock_mux_path: vsock_mux_path.clone(),
vsock_exec_path: vsock_exec_path.clone(),
};
if let Err(e) = (pipelined.callback)(&warmup_ctx) {
let _ = writeln!(writer, "QUIT");
let _ = writer.flush();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!("pipelined bake: warmup callback failed: {e}"));
}
let warmup_ms = warmup_t0.elapsed().as_millis();
let warm_send_t0 = Instant::now();
let snap_warm_str = snap_warm
.to_str()
.ok_or_else(|| "snap_warm path not UTF-8".to_owned())?;
let snap_base_str_for_warm = snap_base
.to_str()
.ok_or_else(|| "snap_base path not UTF-8".to_owned())?;
writeln!(
writer,
"SNAPSHOT {snap_warm_str} base={snap_base_str_for_warm}"
)
.map_err(|e| format!("write SNAPSHOT: {e}"))?;
writer
.flush()
.map_err(|e| format!("flush SNAPSHOT: {e}"))?;
line.clear();
reader
.read_line(&mut line)
.map_err(|e| format!("read DONE_SNAPSHOT: {e}"))?;
let line_trim = line.trim();
let mut warm_save_us: u64 = 0;
let mut warm_bytes: u64 = 0;
if let Some(rest) = line_trim.strip_prefix("DONE_SNAPSHOT") {
for kv in rest.split_ascii_whitespace() {
if let Some(v) = kv.strip_prefix("save_us=") {
warm_save_us = v.parse().unwrap_or(0);
} else if let Some(v) = kv.strip_prefix("bytes_written=") {
warm_bytes = v.parse().unwrap_or(0);
}
}
} else if let Some(rest) = line_trim.strip_prefix("ERR_SNAPSHOT ") {
let _ = writeln!(writer, "QUIT");
let _ = writer.flush();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: warm snapshot failed: {}; see {}",
rest.trim(),
log.display()
));
} else {
let _ = writeln!(writer, "QUIT");
let _ = writer.flush();
let _ = child.wait();
let _ = std::fs::remove_dir_all(&socks_dir);
return Err(format!(
"pipelined bake: bad SNAPSHOT response {:?}; see {}",
line_trim,
log.display()
));
}
let warm_send_ms = warm_send_t0.elapsed().as_millis();
let quit_t0 = Instant::now();
writeln!(writer, "QUIT").map_err(|e| format!("write QUIT: {e}"))?;
writer.flush().map_err(|e| format!("flush QUIT: {e}"))?;
let _ = child.wait();
let quit_ms = quit_t0.elapsed().as_millis();
let _ = std::fs::remove_dir_all(&socks_dir);
if !snap_base.is_file() {
return Err(format!(
"pipelined bake: base snapshot {} missing after worker exit; see {}",
snap_base.display(),
log.display()
));
}
if !snap_warm.is_file() {
return Err(format!(
"pipelined bake: warm snapshot {} missing after worker exit; see {}",
snap_warm.display(),
log.display()
));
}
let vmm_bake_ms = elapsed_ms(bake_t0);
let snapshot_logical_bytes = std::fs::metadata(&snap_base).ok().map(|m| m.len());
let snapshot_physical_bytes = file_physical_bytes(&snap_base);
let warm_logical_bytes = std::fs::metadata(&snap_warm).ok().map(|m| m.len());
let warm_physical_bytes = file_physical_bytes(&snap_warm);
let metadata_t0 = Instant::now();
let runtime_sha = sha256_file(&sm22_bin)?;
let runtime_sha16 = &runtime_sha[..runtime_sha.len().min(16)];
let hash_mode =
std::env::var("SUPERMACHINE_SNAPSHOT_HASH_MODE").unwrap_or_else(|_| "fast".to_owned());
let snapshot_id = compute_snapshot_id(plan, &snap_base, runtime_sha16, &hash_mode)?;
let warm_snapshot_id = compute_snapshot_id(plan, &snap_warm, runtime_sha16, &hash_mode)?;
let baked_at = now_utc_iso()?;
let egress_policy = arg_value(plan.extra_args, "--egress-policy").unwrap_or("");
let balloon_target_pages = plan.memory_mib as u64 * 256 * 70 / 100;
let metadata_prepare_ms = elapsed_ms(metadata_t0);
let total_ms = elapsed_ms(total_t0);
let timings = serde_json::json!({
"total_ms": total_ms,
"pull_inspect_ms": resolution.inspect_ms,
"rootfs_prepare_ms": layer_plan.plan_ms,
"rootfs_customize_ms": delta.prepare_ms,
"init_oci_build_ms": 0,
"squashfs_ms": squashfs_ms,
"initramfs_ms": initramfs_ms,
"vmm_bake_ms": vmm_bake_ms,
"metadata_prepare_ms": metadata_prepare_ms,
"guest_boot_to_listener_ms": listener_ready_ms,
"listener_settle_config_ms": listener_settle_ms,
"bake_ready_ms": bake_ready_ms,
"snapshot_async_send_ms": async_send_ms,
"warmup_ms": warmup_ms,
"warm_send_ms": warm_send_ms,
"quit_ms": quit_ms,
"snapshot_capture_us": base_capture_us,
"warm_save_us": warm_save_us,
"snapshot_reason": "pipelined",
});
let volumes_meta: Vec<serde_json::Value> = parse_volume_args(plan.extra_args)?
.into_iter()
.map(|v| {
serde_json::json!({
"host_file": v.host_file.to_string_lossy(),
"guest_path": v.guest_path,
})
})
.collect();
let restart_policy = arg_value(plan.extra_args, "--restart").unwrap_or("no");
let health_cmd = arg_value(plan.extra_args, "--health-cmd").unwrap_or("");
let health_interval_secs: u32 = arg_value(plan.extra_args, "--health-interval")
.and_then(|s| s.parse().ok())
.unwrap_or(if health_cmd.is_empty() { 0 } else { 30 });
let base_metadata = serde_json::json!({
"name": plan.snapshot_name(),
"image": plan.image,
"port": plan.guest_port,
"memory_mib": plan.memory_mib,
"cmd": resolution.effective_cmd,
"snapshot_sha16": snapshot_id,
"snapshot_id16": snapshot_id,
"snapshot_hash_mode": hash_mode,
"runtime_sha16": runtime_sha16,
"layers": layer_paths,
"volumes": volumes_meta,
"restart_policy": restart_policy,
"health_cmd": health_cmd,
"health_interval_secs": health_interval_secs,
"delta_squashfs": delta_squashfs,
"rootfs_squashfs": serde_json::Value::Null,
"snapshot_base": snap_base,
"snapshot_logical_bytes": snapshot_logical_bytes,
"snapshot_physical_bytes": snapshot_physical_bytes,
"snapshot_ram_data_mib": serde_json::Value::Null,
"snapshot_ram_zero_mib": serde_json::Value::Null,
"init_cpio": init_cpio,
"kernel": kernel,
"egress_policy": egress_policy,
"vcpus": plan.vcpus,
"ttl_seconds": serde_json::Value::Null,
"egress_bps": serde_json::Value::Null,
"cpu_nice": serde_json::Value::Null,
"cpu_affinity": serde_json::Value::Null,
"cpu_qos": serde_json::Value::Null,
"balloon_target_pages": balloon_target_pages,
"auth": {"type": "none"},
"native_bake_key": native_bake_key,
"native_bake_inputs": native_bake_inputs,
"timings": timings,
"baked_at": baked_at,
"supermachine_version": "supermachine",
"pipelined": true,
});
let base_meta_path = out_dir.join("metadata.json");
std::fs::write(
&base_meta_path,
serde_json::to_vec_pretty(&base_metadata)
.map_err(|e| format!("encode base metadata: {e}"))?,
)
.map_err(|e| format!("write base metadata {}: {e}", base_meta_path.display()))?;
let warm_name = format!("{}__warm__{}", plan.snapshot_name(), pipelined.warm_tag);
let warm_metadata = serde_json::json!({
"name": warm_name,
"image": plan.image,
"port": plan.guest_port,
"memory_mib": plan.memory_mib,
"cmd": resolution.effective_cmd,
"snapshot_sha16": warm_snapshot_id,
"snapshot_id16": warm_snapshot_id,
"snapshot_hash_mode": hash_mode,
"runtime_sha16": runtime_sha16,
"layers": layer_paths,
"volumes": volumes_meta,
"restart_policy": restart_policy,
"health_cmd": health_cmd,
"health_interval_secs": health_interval_secs,
"delta_squashfs": delta_squashfs,
"rootfs_squashfs": serde_json::Value::Null,
"snapshot_base": snap_warm,
"snapshot_logical_bytes": warm_logical_bytes,
"snapshot_physical_bytes": warm_physical_bytes,
"snapshot_ram_data_mib": serde_json::Value::Null,
"snapshot_ram_zero_mib": serde_json::Value::Null,
"init_cpio": init_cpio,
"kernel": kernel,
"egress_policy": egress_policy,
"vcpus": plan.vcpus,
"ttl_seconds": serde_json::Value::Null,
"egress_bps": serde_json::Value::Null,
"cpu_nice": serde_json::Value::Null,
"cpu_affinity": serde_json::Value::Null,
"cpu_qos": serde_json::Value::Null,
"balloon_target_pages": balloon_target_pages,
"auth": {"type": "none"},
"native_bake_key": native_bake_key,
"native_bake_inputs": native_bake_inputs,
"timings": timings,
"baked_at": baked_at,
"supermachine_version": "supermachine",
"pipelined": true,
"warm_tag": pipelined.warm_tag,
"warm_of": plan.snapshot_name(),
"bytes_written": warm_bytes,
});
let warm_meta_path = pipelined.warm_dir.join("metadata.json");
std::fs::write(
&warm_meta_path,
serde_json::to_vec_pretty(&warm_metadata)
.map_err(|e| format!("encode warm metadata: {e}"))?,
)
.map_err(|e| format!("write warm metadata {}: {e}", warm_meta_path.display()))?;
Ok(NativeBakeResult {
total_ms,
timings,
reused: false,
})
}
fn compute_snapshot_id(
plan: &BakePlan<'_>,
snap_path: &Path,
runtime_sha16: &str,
hash_mode: &str,
) -> Result<String, String> {
if hash_mode == "content" {
let h = sha256_file(snap_path)?;
Ok(h[..h.len().min(16)].to_owned())
} else if hash_mode == "fast" {
let meta = std::fs::metadata(snap_path)
.map_err(|e| format!("stat snapshot {}: {e}", snap_path.display()))?;
let mtime_ns = meta
.modified()
.map_err(|e| format!("snapshot mtime: {e}"))?
.duration_since(UNIX_EPOCH)
.map_err(|e| format!("snapshot mtime before epoch: {e}"))?
.as_nanos();
let material = format!(
"{}\0{}\0{}\0{}\0{}",
plan.snapshot_name(),
plan.image,
runtime_sha16,
meta.len(),
mtime_ns
);
let h = sha256_text(&material)?;
Ok(h[..h.len().min(16)].to_owned())
} else {
Err("SUPERMACHINE_SNAPSHOT_HASH_MODE must be fast or content".to_owned())
}
}
pub fn run_push_pipelined(
request: &BakeRequest,
run_t0: Instant,
root: &Path,
pipelined: PipelinedWarmup<'_>,
) -> Result<(), String> {
let plan = BakePlan::from_request(request);
if plan.runtime != "supermachine" {
return Err(format!(
"pipelined bake only supports the native supermachine runtime, got {:?}",
plan.runtime
));
}
let source = select_image_source(plan.image)?;
let resolution = resolve_image(&plan, source.as_ref())?;
if trace_enabled() {
emit_image_resolution_trace(&resolution);
}
let layer_plan = plan_layers(&plan, &resolution, source.as_ref())?;
let layer_materialization_result = match layer_plan.as_ref() {
Some(layer_plan) => {
materialize_missing_layers(plan.image, layer_plan, source.as_ref()).map(Some)
}
None => Ok(None),
};
if let Some(work_dir) = layer_plan
.as_ref()
.and_then(|layer_plan| layer_plan.save_work_dir.as_deref())
{
let _ = std::fs::remove_dir_all(work_dir);
}
let _layer_materialization = layer_materialization_result?;
let delta_materialization = materialize_delta_cache(&plan, &resolution, root)?;
let layer_plan = layer_plan
.as_ref()
.ok_or_else(|| "pipelined bake: missing layer plan".to_owned())?;
let delta = delta_materialization
.as_ref()
.ok_or_else(|| "pipelined bake: missing delta cache".to_owned())?;
let (native_bake_key, native_bake_inputs) =
native_supermachine_bake_key(&plan, &resolution, layer_plan, delta, root)?;
let _native_t0 = Instant::now();
let result = run_native_supermachine_bake_pipelined(
&plan,
&resolution,
layer_plan,
delta,
root,
&native_bake_key,
&native_bake_inputs,
pipelined,
)?;
if trace_enabled() {
eprintln!(
"supermachine: pipelined bake finished after {}ms total={}ms",
result.total_ms,
elapsed_ms(run_t0)
);
eprintln!("supermachine: bake timings {}", result.timings);
}
Ok(())
}
fn materialize_delta_cache(
plan: &BakePlan<'_>,
resolution: &ImageResolution,
root: &Path,
) -> Result<Option<DeltaMaterialization>, String> {
if plan.runtime != "supermachine" {
return Ok(None);
}
let prepare_t0 = Instant::now();
let mut result = DeltaMaterialization {
prepare_ms: 0,
materialize_ms: 0,
cache_hit: false,
skipped: None,
key: None,
cache_path: None,
};
if arg_present(plan.extra_args, "--inbound-tls-autogen") {
result.prepare_ms = elapsed_ms(prepare_t0);
result.skipped = Some("inbound-tls-autogen".to_owned());
return Ok(Some(result));
}
let cmd_json = plan.cmd_override.map(ToOwned::to_owned).unwrap_or_else(|| {
serde_json::to_string(&resolution.effective_cmd).unwrap_or_else(|_| "[]".to_owned())
});
let mut key_material = format!("cmd={cmd_json}");
let stage = temp_work_dir("supermachine-delta-stage")?;
let materialize_t0 = Instant::now();
let materialized = (|| {
write_lines(&stage.join(".supermachine-cmd"), &resolution.effective_cmd)?;
if let Some(cwd) = resolution
.working_dir
.as_deref()
.filter(|cwd| !cwd.is_empty() && *cwd != "/")
{
std::fs::write(stage.join(".supermachine-workdir"), format!("{cwd}\n"))
.map_err(|e| format!("write .supermachine-workdir: {e}"))?;
key_material.push_str(&format!("\ncwd={cwd}"));
}
if let Some(user) = resolution.user.as_deref().filter(|user| !user.is_empty()) {
std::fs::write(stage.join(".supermachine-user"), format!("{user}\n"))
.map_err(|e| format!("write .supermachine-user: {e}"))?;
key_material.push_str(&format!("\nuser={user}"));
}
if let Some(hostname) = arg_value(plan.extra_args, "--hostname") {
if hostname.is_empty()
|| hostname.len() > 63
|| hostname.contains(|c: char| c.is_whitespace() || c == '\0')
{
return Err(format!("--hostname {hostname:?} invalid (1..=63 chars, no whitespace)"));
}
std::fs::write(stage.join(".supermachine-hostname"), format!("{hostname}\n"))
.map_err(|e| format!("write .supermachine-hostname: {e}"))?;
key_material.push_str(&format!("\nhostname={hostname}"));
}
for extra in arg_values(plan.extra_args, "--extra-file") {
let Some((host, guest)) = extra.split_once(':') else {
return Err(format!("bad --extra-file '{extra}' (want host:guest)"));
};
let host_path = PathBuf::from(host);
let guest_path = guest.trim_start_matches('/');
let dst = stage.join(guest_path);
if let Some(parent) = dst.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| format!("create extra parent {}: {e}", parent.display()))?;
}
std::fs::copy(&host_path, &dst).map_err(|e| {
format!(
"copy extra {} -> {}: {e}",
host_path.display(),
dst.display()
)
})?;
let mode = file_mode_octal(&host_path)?;
let sum = sha256_file(&host_path)?;
key_material.push_str(&format!("\nextra={guest}:{mode}:{sum}"));
}
let tls_cert = arg_value(plan.extra_args, "--inbound-tls-cert");
let tls_key = arg_value(plan.extra_args, "--inbound-tls-key");
if tls_cert.is_some() || tls_key.is_some() {
let cert = tls_cert
.ok_or_else(|| "need both --inbound-tls-cert and --inbound-tls-key".to_owned())?;
let key = tls_key
.ok_or_else(|| "need both --inbound-tls-cert and --inbound-tls-key".to_owned())?;
let tls_dir = stage.join("etc/supermachine");
std::fs::create_dir_all(&tls_dir)
.map_err(|e| format!("create TLS dir {}: {e}", tls_dir.display()))?;
let cert_path = PathBuf::from(cert);
let key_path = PathBuf::from(key);
std::fs::copy(&cert_path, tls_dir.join("cert.pem"))
.map_err(|e| format!("copy TLS cert {}: {e}", cert_path.display()))?;
std::fs::copy(&key_path, tls_dir.join("key.pem"))
.map_err(|e| format!("copy TLS key {}: {e}", key_path.display()))?;
set_mode(&tls_dir.join("cert.pem"), 0o644)?;
set_mode(&tls_dir.join("key.pem"), 0o600)?;
let cert_sum = sha256_file(&cert_path)?;
let key_sum = sha256_file(&key_path)?;
key_material.push_str(&format!("\ntls=provided:{cert_sum}:{key_sum}"));
}
for dir in ["proc", "sys", "dev", "dev/shm", "tmp", "run"] {
std::fs::create_dir_all(stage.join(dir))
.map_err(|e| format!("create delta dir {dir}: {e}"))?;
}
let init = ensure_init_oci(root)?;
let init_dst = stage.join("init");
std::fs::copy(&init, &init_dst)
.map_err(|e| format!("copy init-oci {}: {e}", init.display()))?;
set_mode(&init_dst, 0o755)?;
key_material.push_str(&format!("\ninit={}", file_size_mtime(&init)?));
let volumes = parse_volume_args(plan.extra_args)?;
if !volumes.is_empty() {
let body: String = volumes
.iter()
.map(|v| format!("{}\n", v.guest_path))
.collect();
std::fs::write(stage.join(".supermachine-volumes"), &body)
.map_err(|e| format!("write .supermachine-volumes: {e}"))?;
for v in &volumes {
key_material.push_str(&format!("\nvolume={}", v.guest_path));
}
}
let agent = ensure_supermachine_agent(root)?;
let agent_dst = stage.join("supermachine-agent");
std::fs::copy(&agent, &agent_dst)
.map_err(|e| format!("copy supermachine-agent {}: {e}", agent.display()))?;
set_mode(&agent_dst, 0o755)?;
key_material.push_str(&format!("\nagent={}", file_size_mtime(&agent)?));
let key = sha256_text(&format!("{key_material}\n"))?;
let delta_cache_dir = layer_cache_dir().join("deltas");
std::fs::create_dir_all(&delta_cache_dir)
.map_err(|e| format!("create delta cache dir {}: {e}", delta_cache_dir.display()))?;
let cache_path = delta_cache_dir.join(format!("{key}.squashfs"));
result.prepare_ms = elapsed_ms(prepare_t0);
result.key = Some(key);
result.cache_path = Some(cache_path.clone());
if cache_path.is_file() {
result.cache_hit = true;
return Ok(());
}
let unique = TEMP_COUNTER.fetch_add(1, Ordering::Relaxed);
let tmp = delta_cache_dir.join(format!(
".delta.{}.{}.squashfs.tmp",
std::process::id(),
unique
));
let _ = std::fs::remove_file(&tmp);
let mut squash = Command::new("mksquashfs");
squash
.arg(&stage)
.arg(&tmp)
.arg("-noappend")
.arg("-comp")
.arg("zstd")
.arg("-Xcompression-level")
.arg("3")
.stdout(Stdio::null())
.stderr(Stdio::null());
run_status(squash, "mksquashfs delta cache")?;
if cache_path.is_file() {
let _ = std::fs::remove_file(&tmp);
result.cache_hit = true;
} else {
std::fs::rename(&tmp, &cache_path).map_err(|e| {
format!(
"install delta cache {} -> {}: {e}",
tmp.display(),
cache_path.display()
)
})?;
result.cache_hit = false;
}
Ok(())
})();
result.materialize_ms = elapsed_ms(materialize_t0);
let _ = std::fs::remove_dir_all(&stage);
materialized?;
Ok(Some(result))
}
fn resolve_image(plan: &BakePlan<'_>, source: &dyn ImageSource) -> Result<ImageResolution, String> {
let t0 = Instant::now();
let local_arch = source.local_arch(plan.image);
let pull_action = match plan.pull_policy {
"always" => {
source.pull_arm64(plan.image, true)?;
"always".to_owned()
}
"missing" => {
if local_arch.as_deref() == Some("arm64") {
"skipped-local-arm64".to_owned()
} else {
source.pull_arm64(plan.image, false)?;
"missing-pulled-arm64".to_owned()
}
}
"never" => {
if local_arch.is_none() {
return Err(format!(
"image {} not present locally and --pull never was set",
plan.image
));
}
"never".to_owned()
}
other => return Err(format!("unknown --pull policy: {other}")),
};
let image_obj = source.inspect(plan.image)?;
let cfg = image_obj
.get("Config")
.ok_or_else(|| "image inspect missing Config".to_owned())?;
let image_id = image_obj
.get("Id")
.and_then(|v| v.as_str())
.map(|s| s.strip_prefix("sha256:").unwrap_or(s).to_owned());
let architecture = image_obj
.get("Architecture")
.and_then(|v| v.as_str())
.map(ToOwned::to_owned);
let env_count = cfg
.get("Env")
.and_then(|v| v.as_array())
.map(|v| v.len())
.unwrap_or(0);
let env = value_string_array(cfg.get("Env"));
let workdir_override = arg_value(plan.extra_args, "--workdir").map(ToOwned::to_owned);
let user_override = arg_value(plan.extra_args, "--user").map(ToOwned::to_owned);
let working_dir = workdir_override.or_else(|| {
cfg.get("WorkingDir")
.and_then(|v| v.as_str())
.filter(|s| !s.is_empty())
.map(ToOwned::to_owned)
});
let user = user_override.or_else(|| {
cfg.get("User")
.and_then(|v| v.as_str())
.filter(|s| !s.is_empty())
.map(ToOwned::to_owned)
});
let entrypoint_override = arg_value(plan.extra_args, "--entrypoint").map(ToOwned::to_owned);
let effective_cmd = if let Some(cmd_override) = plan.cmd_override {
let cmd_argv = serde_json::from_str::<Vec<String>>(cmd_override)
.map_err(|e| format!("--cmd must be a JSON string array: {e}"))?;
match &entrypoint_override {
Some(ep) if !ep.is_empty() => {
let mut argv = vec![ep.clone()];
argv.extend(cmd_argv);
argv
}
_ => cmd_argv,
}
} else if let Some(ep) = &entrypoint_override {
let mut argv = vec![ep.clone()];
argv.extend(value_string_array(cfg.get("Cmd")));
if argv.len() == 1 && ep.is_empty() {
return Err("--entrypoint cannot be empty without --cmd".to_owned());
}
argv
} else {
if let Some(prev_cmd) = previous_metadata_cmd(plan) {
prev_cmd
} else {
let mut argv = value_string_array(cfg.get("Entrypoint"));
argv.extend(value_string_array(cfg.get("Cmd")));
if argv.is_empty() {
return Err(format!(
"image {} has no CMD/ENTRYPOINT; pass --cmd '<argv json>'",
plan.image
));
}
argv
}
};
Ok(ImageResolution {
local_arch,
architecture,
image_id,
effective_cmd,
working_dir,
user,
env,
env_count,
pull_action,
inspect_ms: elapsed_ms(t0),
})
}