1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
//! Measure pre-exec vs listener-ready bake speed for a workload
//! that doesn't hit the parked-PID-1 fast path.
//!
//! Setup: python:slim with `python3 -c "import time; time.sleep(60)"` —
//! a long-running process that NEVER binds a listener and NEVER exits
//! (so neither `parked-PID-1` nor `listener-ready` triggers fire). The
//! existing baseline falls back to the wall-clock `--snapshot-after-ms`
//! default (7000 ms!). With pre-exec, bake should fire in ~150 ms.
//!
//! This is the scenario the pre-exec optimization actually targets:
//! workloads with slow / no listener bringup. For the common case
//! (alpine /bin/sh exits, parked-PID-1 fires) pre-exec adds overhead
//! without saving time.
use std::time::Instant;
use supermachine::Image;
fn bake_once(name: &str) -> Result<u128, supermachine::Error> {
let home = std::env::var("HOME").expect("HOME");
let _ = std::fs::remove_dir_all(format!(
"{home}/.local/supermachine-snapshots/{name}"
));
let _ = std::fs::remove_dir_all(format!(
"{home}/.local/supermachine-snapshots/{name}__warm__unused"
));
let t0 = Instant::now();
let _ = Image::builder("python:slim")
.with_name(name)
.with_memory_mib(256)
.with_cmd(["python3", "-c", "import time; time.sleep(60)"])
.build()?;
Ok(t0.elapsed().as_millis())
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!(
"[bench] cold-bake of python:slim with slow workload \
(no listener, never exits) — 3 trials"
);
let mut times = vec![];
for i in 0..3 {
match bake_once(&format!("_pre_exec_cmp_{i}")) {
Ok(ms) => {
eprintln!(" trial {i}: {ms} ms");
times.push(ms);
}
Err(e) => eprintln!(" trial {i}: FAILED: {e}"),
}
}
times.sort();
if !times.is_empty() {
eprintln!(
" min={} med={} max={}",
times[0],
times[times.len() / 2],
times[times.len() - 1]
);
}
Ok(())
}