supermachine 0.4.23

Run any OCI/Docker image as a hardware-isolated microVM on macOS HVF (Linux KVM and Windows WHP in progress). Single library API, zero flags for the common case, sub-100 ms cold-restore from snapshot.
//! Measure: how does pool.build() wall-time scale with `min` (parallel
//! workers spawned from disk)? If it scales sub-linearly we have a real
//! win to capture; if it's flat, the page cache is already doing the job.

use std::time::Instant;
use supermachine::Image;

fn main() -> Result<(), Box<dyn std::error::Error>> {
    let home = std::env::var("HOME")?;
    let snap_name = "_parallel_spawn_bench";
    let snap_dir = format!("{home}/.local/supermachine-snapshots/{snap_name}");

    // Bake once if missing.
    if !std::path::Path::new(&format!("{snap_dir}/restore.snap")).exists() {
        eprintln!("[bench] baking fresh snapshot...");
        let _ = Image::builder("alpine:latest")
            .with_name(snap_name)
            .with_memory_mib(256)
            .build()?;
        std::thread::sleep(std::time::Duration::from_secs(2)); // let bg save complete
    }

    // For each `min`, build the pool 3 times and take median wall time.
    // Critically we use `Image::from_snapshot` (not the builder) so there's
    // no warm-handoff worker to bias the first slot.
    for min in &[1usize, 2, 4, 8] {
        let mut times = vec![];
        for _ in 0..3 {
            let img = Image::from_snapshot(&snap_dir)?;
            let t0 = Instant::now();
            let pool = img.pool().min(*min).max(*min).build()?;
            let elapsed = t0.elapsed();
            times.push(elapsed.as_millis());
            // Keep pool alive briefly so workers don't get reaped mid-bench
            std::thread::sleep(std::time::Duration::from_millis(50));
            drop(pool);
        }
        times.sort();
        eprintln!(
            "min={min:2} pool.build(): min={} med={} max={} ms",
            times[0], times[1], times[2]
        );
    }

    Ok(())
}