supermachine 0.7.4

Run any OCI/Docker image as a hardware-isolated microVM on macOS HVF (Linux KVM and Windows WHP in progress). Single library API, zero flags for the common case, sub-100 ms cold-restore from snapshot.
//! Integrator's actual workload: write source, exec rustc, drop.
//! Measures end-to-end per-cycle latency split between rustc CPU
//! and supermachine overhead (acquire + write_file + drop).
//!
//! Two configurations:
//!   - fixed = pool().min(5).max(5)   (zero auto-scaling)
//!   - elastic = pool().min(0).max(5) (lazy + recycle via dirty queue)
//!
//! Both should converge to the same steady-state cycle time
//! since the workload is rustc-bound. The difference shows up in
//! cold-start (elastic pays spawn on first 5 acquires) and idle
//! eviction.

use std::sync::Arc;
use std::time::{Duration, Instant};
use supermachine::Image;

const SOURCE: &[u8] = b"fn main() { println!(\"hello\"); }";
const CMD: &[&str] = &["sh", "-c", "rustc -O /tmp/main.rs -o /tmp/m && /tmp/m"];

fn main() -> Result<(), Box<dyn std::error::Error>> {
    let snap = format!(
        "{}/.local/supermachine-snapshots/rust_1_slim",
        std::env::var("HOME")?
    );
    if !std::path::Path::new(&snap).is_dir() {
        eprintln!("rust_1_slim snapshot missing — bake with: supermachine pull rust:1-slim --name rust_1_slim --memory 2048");
        return Ok(());
    }
    let image = Arc::new(Image::from_snapshot(&snap)?);
    let n: usize = std::env::var("N")
        .ok()
        .and_then(|s| s.parse().ok())
        .unwrap_or(20);
    let concurrency: usize = std::env::var("CONCURRENCY")
        .ok()
        .and_then(|s| s.parse().ok())
        .unwrap_or(1);

    let pool = image
        .pool()
        .min(concurrency.max(1))
        .max(concurrency.max(1))
        .idle_timeout(Duration::MAX)
        .build()?;
    println!(
        "pool: min={} max={} fixed (no auto-scale, no eviction)",
        concurrency.max(1),
        concurrency.max(1)
    );

    println!("=== {n} cycles, concurrency={concurrency} ===");
    let bench_t0 = Instant::now();

    let mut times = Vec::with_capacity(n);
    if concurrency == 1 {
        for _ in 0..n {
            let t0 = Instant::now();
            let vm = pool.acquire()?;
            vm.write_file("/tmp/main.rs", SOURCE)?;
            let _ = vm
                .exec_builder()
                .argv(CMD.iter().copied())
                .timeout(Duration::from_secs(60))
                .output()?;
            drop(vm);
            times.push(t0.elapsed().as_micros() as u64);
        }
    } else {
        // Concurrent workers
        let pool = Arc::new(pool);
        let mut handles = vec![];
        let per_worker = n / concurrency;
        for _ in 0..concurrency {
            let pool = Arc::clone(&pool);
            handles.push(std::thread::spawn(move || -> Result<Vec<u64>, String> {
                let mut local = Vec::with_capacity(per_worker);
                for _ in 0..per_worker {
                    let t0 = Instant::now();
                    let vm = pool.acquire().map_err(|e| e.to_string())?;
                    vm.write_file("/tmp/main.rs", SOURCE).map_err(|e| e.to_string())?;
                    let _ = vm
                        .exec_builder()
                        .argv(CMD.iter().copied())
                        .timeout(Duration::from_secs(60))
                        .output()
                        .map_err(|e| e.to_string())?;
                    drop(vm);
                    local.push(t0.elapsed().as_micros() as u64);
                }
                Ok(local)
            }));
        }
        for h in handles {
            times.extend(h.join().unwrap()?);
        }
    }

    let total = bench_t0.elapsed();
    times.sort();
    let median = times[times.len() / 2];
    let p95 = times[(times.len() * 95 / 100).min(times.len() - 1)];
    let max = *times.last().unwrap();
    let mean = times.iter().sum::<u64>() / times.len() as u64;
    println!(
        "  total wall: {total:?}  cycle median: {} ms  p95: {} ms  max: {} ms  mean: {} ms",
        median / 1000,
        p95 / 1000,
        max / 1000,
        mean / 1000
    );
    println!(
        "  throughput: {:.1} cycles/s",
        n as f64 / total.as_secs_f64()
    );
    Ok(())
}