use std::fmt;
use std::io::Write;
use std::os::unix::net::UnixStream;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc;
use std::sync::{Arc, Mutex};
use std::thread::JoinHandle;
use std::time::Duration;
use crate::vmm::resources::VmResources;
use crate::vmm::runner::{run, RunError, RunOptions, RunReport};
type TransportIdleCheck = Arc<dyn Fn() -> bool + Send + Sync + 'static>;
#[derive(Debug)]
pub struct RestoreRequest {
pub path: String,
pub egress_policy: Option<String>,
}
#[derive(Debug)]
pub struct SnapshotRequest {
pub out_path: String,
}
#[derive(Clone, Debug)]
pub struct SnapshotResult {
pub bytes_written: u64,
pub capture_us: u128,
pub save_us: u128,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct PoolRestoreResult {
pub restore_us: u128,
pub host_port: Option<u16>,
pub timings: WarmRestoreTimings,
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct WarmRestoreTimings {
pub reset_vsock_us: u128,
pub remap_cow_us: u128,
pub load_meta_us: u128,
pub restore_snapshot_us: u128,
pub ram_copy_us: u128,
pub gic_restore_us: u128,
pub vcpu_restore_us: u128,
pub vtimer_offset_us: u128,
pub mmio_restore_us: u128,
pub listener_restore_us: u128,
}
#[derive(Clone)]
pub struct PoolHandle {
cmd_tx: mpsc::Sender<PoolCommand>,
request_lock: Arc<Mutex<()>>,
busy: Arc<AtomicBool>,
}
pub struct PoolWorker {
cmd_rx: mpsc::Receiver<PoolCommand>,
active_done: Arc<Mutex<Option<mpsc::Sender<PoolRestoreResult>>>>,
busy: Arc<AtomicBool>,
}
pub struct WarmPool {
handle: PoolHandle,
vm_thread: Option<JoinHandle<Result<RunReport, RunError>>>,
}
pub struct PoolControl {
pause_pending: Arc<AtomicBool>,
restore_req: Arc<Mutex<Option<RestoreRequest>>>,
snapshot_req: Arc<Mutex<Option<SnapshotRequest>>>,
snapshot_done: Arc<Mutex<Option<mpsc::Sender<Result<SnapshotResult, String>>>>>,
quit: Arc<AtomicBool>,
writer: Option<UnixStream>,
active_done: Option<Arc<Mutex<Option<mpsc::Sender<PoolRestoreResult>>>>>,
busy: Option<Arc<AtomicBool>>,
}
#[derive(Debug)]
pub enum PoolError {
InitialDoneClone(std::io::Error),
InitialDoneWrite(std::io::Error),
ReaderClone(std::io::Error),
ThreadSpawn(std::io::Error),
}
#[derive(Debug, PartialEq, Eq)]
pub enum PoolClientError {
CommandClosed,
CompletionClosed,
CompletionTimeout,
RestoreInFlight,
SnapshotFailed(String),
}
#[derive(Debug)]
pub enum WarmPoolError {
AlreadyJoined,
Pool(PoolClientError),
PoolWorkerAlreadySet,
Run(RunError),
ThreadJoin,
ThreadSpawn(std::io::Error),
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn warm_pool_restore_fails_when_worker_is_dropped() {
let (handle, worker) = warm_pool();
drop(worker);
assert_eq!(
handle.restore("snap.sm"),
Err(PoolClientError::CommandClosed)
);
}
#[test]
fn warm_pool_quit_fails_when_worker_is_dropped() {
let (handle, worker) = warm_pool();
drop(worker);
assert_eq!(handle.quit(), Err(PoolClientError::CommandClosed));
}
#[test]
fn warm_pool_restore_receives_per_request_completion() {
let (handle, worker) = warm_pool();
let restore = std::thread::spawn(move || handle.restore("snap.sm"));
match worker.cmd_rx.recv().unwrap() {
PoolCommand::Restore { request, done_tx } => {
assert_eq!(request.path, "snap.sm");
done_tx
.send(PoolRestoreResult {
restore_us: 123,
host_port: Some(61820),
timings: WarmRestoreTimings {
reset_vsock_us: 1,
remap_cow_us: 2,
load_meta_us: 3,
restore_snapshot_us: 4,
ram_copy_us: 5,
gic_restore_us: 6,
vcpu_restore_us: 7,
vtimer_offset_us: 8,
mmio_restore_us: 9,
listener_restore_us: 10,
},
})
.unwrap();
}
PoolCommand::Quit => panic!("unexpected quit command"),
PoolCommand::Snapshot { .. } => panic!("unexpected snapshot command"),
}
assert_eq!(
restore.join().unwrap(),
Ok(PoolRestoreResult {
restore_us: 123,
host_port: Some(61820),
timings: WarmRestoreTimings {
reset_vsock_us: 1,
remap_cow_us: 2,
load_meta_us: 3,
restore_snapshot_us: 4,
ram_copy_us: 5,
gic_restore_us: 6,
vcpu_restore_us: 7,
vtimer_offset_us: 8,
mmio_restore_us: 9,
listener_restore_us: 10,
},
})
);
}
#[test]
fn warm_pool_restore_timeout_keeps_request_in_flight() {
let (handle, _worker) = warm_pool();
assert_eq!(
handle.restore_timeout("snap.sm", Duration::ZERO),
Err(PoolClientError::CompletionTimeout)
);
assert_eq!(
handle.restore_timeout("snap.sm", Duration::ZERO),
Err(PoolClientError::RestoreInFlight)
);
}
#[test]
fn warm_pool_manager_rejects_existing_worker() {
let (_handle, worker) = warm_pool();
let result = WarmPool::start(
VmResources::from_snapshot("snap.sm"),
RunOptions {
pool_worker: Some(worker),
..RunOptions::default()
},
);
assert!(matches!(result, Err(WarmPoolError::PoolWorkerAlreadySet)));
}
}
enum PoolCommand {
Restore {
request: RestoreRequest,
done_tx: mpsc::Sender<PoolRestoreResult>,
},
Snapshot {
request: SnapshotRequest,
done_tx: mpsc::Sender<Result<SnapshotResult, String>>,
},
Quit,
}
pub fn warm_pool() -> (PoolHandle, PoolWorker) {
let (cmd_tx, cmd_rx) = mpsc::channel();
let active_done = Arc::new(Mutex::new(None));
let busy = Arc::new(AtomicBool::new(false));
(
PoolHandle {
cmd_tx,
request_lock: Arc::new(Mutex::new(())),
busy: busy.clone(),
},
PoolWorker {
cmd_rx,
active_done,
busy,
},
)
}
impl PoolHandle {
pub fn restore(&self, path: impl Into<String>) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request(RestoreRequest {
path: path.into(),
egress_policy: None,
})
}
pub fn restore_with_egress_policy(
&self,
path: impl Into<String>,
egress_policy: impl Into<String>,
) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request(RestoreRequest {
path: path.into(),
egress_policy: Some(egress_policy.into()),
})
}
pub fn restore_timeout(
&self,
path: impl Into<String>,
timeout: Duration,
) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request_timeout(
RestoreRequest {
path: path.into(),
egress_policy: None,
},
timeout,
)
}
pub fn restore_with_egress_policy_timeout(
&self,
path: impl Into<String>,
egress_policy: impl Into<String>,
timeout: Duration,
) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request_timeout(
RestoreRequest {
path: path.into(),
egress_policy: Some(egress_policy.into()),
},
timeout,
)
}
pub fn quit(&self) -> Result<(), PoolClientError> {
self.cmd_tx
.send(PoolCommand::Quit)
.map_err(|_| PoolClientError::CommandClosed)
}
pub fn snapshot(
&self,
out_path: impl Into<String>,
) -> Result<SnapshotResult, PoolClientError> {
self.snapshot_request_inner(
SnapshotRequest { out_path: out_path.into() },
None,
)
}
pub fn snapshot_timeout(
&self,
out_path: impl Into<String>,
timeout: Duration,
) -> Result<SnapshotResult, PoolClientError> {
self.snapshot_request_inner(
SnapshotRequest { out_path: out_path.into() },
Some(timeout),
)
}
fn snapshot_request_inner(
&self,
request: SnapshotRequest,
timeout: Option<Duration>,
) -> Result<SnapshotResult, PoolClientError> {
let _lock = self
.request_lock
.lock()
.map_err(|_| PoolClientError::CommandClosed)?;
if self.busy.swap(true, Ordering::SeqCst) {
return Err(PoolClientError::RestoreInFlight);
}
let (done_tx, done_rx) = mpsc::channel();
self.cmd_tx
.send(PoolCommand::Snapshot { request, done_tx })
.map_err(|_| {
self.busy.store(false, Ordering::SeqCst);
PoolClientError::CommandClosed
})?;
let result = match timeout {
Some(t) => done_rx
.recv_timeout(t)
.map_err(|e| match e {
mpsc::RecvTimeoutError::Timeout => PoolClientError::CompletionTimeout,
mpsc::RecvTimeoutError::Disconnected => PoolClientError::CompletionClosed,
}),
None => done_rx
.recv()
.map_err(|_| PoolClientError::CompletionClosed),
};
self.busy.store(false, Ordering::SeqCst);
match result? {
Ok(r) => Ok(r),
Err(msg) => Err(PoolClientError::SnapshotFailed(msg)),
}
}
fn restore_request(
&self,
request: RestoreRequest,
) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request_inner(request, None)
}
fn restore_request_timeout(
&self,
request: RestoreRequest,
timeout: Duration,
) -> Result<PoolRestoreResult, PoolClientError> {
self.restore_request_inner(request, Some(timeout))
}
fn restore_request_inner(
&self,
request: RestoreRequest,
timeout: Option<Duration>,
) -> Result<PoolRestoreResult, PoolClientError> {
let _guard = self.request_lock.lock().unwrap();
if self.busy.swap(true, Ordering::SeqCst) {
return Err(PoolClientError::RestoreInFlight);
}
let (done_tx, done_rx) = mpsc::channel();
self.cmd_tx
.send(PoolCommand::Restore { request, done_tx })
.map_err(|_| {
self.busy.store(false, Ordering::SeqCst);
PoolClientError::CommandClosed
})?;
let result = match timeout {
Some(timeout) => match done_rx.recv_timeout(timeout) {
Ok(result) => Ok(result),
Err(mpsc::RecvTimeoutError::Timeout) => Err(PoolClientError::CompletionTimeout),
Err(mpsc::RecvTimeoutError::Disconnected) => Err(PoolClientError::CompletionClosed),
},
None => done_rx
.recv()
.map_err(|_| PoolClientError::CompletionClosed),
};
if !matches!(result, Err(PoolClientError::CompletionTimeout)) {
self.busy.store(false, Ordering::SeqCst);
}
result
}
}
impl WarmPool {
pub fn start(resources: VmResources, mut options: RunOptions) -> Result<Self, WarmPoolError> {
if options.pool_worker.is_some() {
return Err(WarmPoolError::PoolWorkerAlreadySet);
}
let (handle, worker) = warm_pool();
options.pool_worker = Some(worker);
let vm_thread = std::thread::Builder::new()
.name("warm-pool-vm".into())
.spawn(move || {
set_vm_thread_qos();
run(&resources, options)
})
.map_err(WarmPoolError::ThreadSpawn)?;
Ok(Self {
handle,
vm_thread: Some(vm_thread),
})
}
pub fn handle(&self) -> PoolHandle {
self.handle.clone()
}
pub fn restore(&self, path: impl Into<String>) -> Result<PoolRestoreResult, PoolClientError> {
self.handle.restore(path)
}
pub fn restore_timeout(
&self,
path: impl Into<String>,
timeout: Duration,
) -> Result<PoolRestoreResult, PoolClientError> {
self.handle.restore_timeout(path, timeout)
}
pub fn restore_with_egress_policy(
&self,
path: impl Into<String>,
egress_policy: impl Into<String>,
) -> Result<PoolRestoreResult, PoolClientError> {
self.handle.restore_with_egress_policy(path, egress_policy)
}
pub fn restore_with_egress_policy_timeout(
&self,
path: impl Into<String>,
egress_policy: impl Into<String>,
timeout: Duration,
) -> Result<PoolRestoreResult, PoolClientError> {
self.handle
.restore_with_egress_policy_timeout(path, egress_policy, timeout)
}
pub fn snapshot(
&self,
out_path: impl Into<String>,
) -> Result<SnapshotResult, PoolClientError> {
self.handle.snapshot(out_path)
}
pub fn snapshot_timeout(
&self,
out_path: impl Into<String>,
timeout: Duration,
) -> Result<SnapshotResult, PoolClientError> {
self.handle.snapshot_timeout(out_path, timeout)
}
pub fn shutdown(mut self) -> Result<RunReport, WarmPoolError> {
let quit = self.handle.quit();
let joined = self.join();
match joined {
Ok(report) => {
quit.map_err(WarmPoolError::Pool)?;
Ok(report)
}
Err(e) => Err(e),
}
}
pub fn join(&mut self) -> Result<RunReport, WarmPoolError> {
let vm_thread = self.vm_thread.take().ok_or(WarmPoolError::AlreadyJoined)?;
vm_thread
.join()
.map_err(|_| WarmPoolError::ThreadJoin)?
.map_err(WarmPoolError::Run)
}
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
fn set_vm_thread_qos() {
const QOS_CLASS_USER_INTERACTIVE: u32 = 0x21;
unsafe extern "C" {
fn pthread_set_qos_class_self_np(qos_class: u32, relative_priority: i32) -> i32;
}
unsafe {
let _ = pthread_set_qos_class_self_np(QOS_CLASS_USER_INTERACTIVE, 0);
}
}
#[cfg(not(all(target_os = "macos", target_arch = "aarch64")))]
fn set_vm_thread_qos() {}
impl fmt::Display for PoolError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PoolError::InitialDoneClone(e) => {
write!(f, "clone pool socket for initial DONE: {e}")
}
PoolError::InitialDoneWrite(e) => write!(f, "write initial pool DONE: {e}"),
PoolError::ReaderClone(e) => write!(f, "clone pool socket for reader: {e}"),
PoolError::ThreadSpawn(e) => write!(f, "spawn pool supervisor reader: {e}"),
}
}
}
impl std::error::Error for PoolError {}
impl fmt::Display for PoolClientError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PoolClientError::CommandClosed => write!(f, "warm pool command channel closed"),
PoolClientError::CompletionClosed => write!(f, "warm pool completion channel closed"),
PoolClientError::CompletionTimeout => write!(f, "warm pool completion timed out"),
PoolClientError::RestoreInFlight => write!(f, "warm pool restore already in flight"),
PoolClientError::SnapshotFailed(msg) => write!(f, "snapshot failed: {msg}"),
}
}
}
impl std::error::Error for PoolClientError {}
impl fmt::Display for WarmPoolError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
WarmPoolError::AlreadyJoined => write!(f, "warm pool VM thread already joined"),
WarmPoolError::Pool(e) => write!(f, "{e}"),
WarmPoolError::PoolWorkerAlreadySet => {
write!(f, "RunOptions already contains a pool worker")
}
WarmPoolError::Run(e) => write!(f, "{e}"),
WarmPoolError::ThreadJoin => write!(f, "warm pool VM thread panicked"),
WarmPoolError::ThreadSpawn(e) => write!(f, "spawn warm pool VM thread: {e}"),
}
}
}
impl std::error::Error for WarmPoolError {}
impl PoolControl {
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
pub fn start(
sock: Option<&UnixStream>,
worker: Option<PoolWorker>,
initial_restore_us: Option<u128>,
initial_host_port: Option<u16>,
vcpu0_handle: applevisor_sys::hv_vcpu_t,
transport_idle: Option<TransportIdleCheck>,
) -> Result<Self, PoolError> {
let pause_pending = Arc::new(AtomicBool::new(false));
let restore_req = Arc::new(Mutex::new(None));
let snapshot_req = Arc::new(Mutex::new(None));
let snapshot_done = Arc::new(Mutex::new(None));
let quit = Arc::new(AtomicBool::new(false));
let mut writer = None;
let mut active_done = None;
let mut busy = None;
if let Some(sock) = sock {
if let Some(us) = initial_restore_us {
let mut s = sock.try_clone().map_err(PoolError::InitialDoneClone)?;
write_done(&mut s, us, initial_host_port, WarmRestoreTimings::default())
.map_err(PoolError::InitialDoneWrite)?;
writer = Some(s);
}
let pp = pause_pending.clone();
let rr = restore_req.clone();
let qf = quit.clone();
let transport_idle = transport_idle.clone();
let read_half = sock.try_clone().map_err(PoolError::ReaderClone)?;
std::thread::Builder::new()
.name("pool-supervisor-reader".into())
.spawn(move || read_supervisor(read_half, pp, rr, qf, vcpu0_handle, transport_idle))
.map_err(PoolError::ThreadSpawn)?;
}
if let Some(worker) = worker {
let pp = pause_pending.clone();
let rr = restore_req.clone();
let sr = snapshot_req.clone();
let sd = snapshot_done.clone();
let qf = quit.clone();
let ad = worker.active_done.clone();
let transport_idle = transport_idle.clone();
active_done = Some(worker.active_done);
busy = Some(worker.busy.clone());
std::thread::Builder::new()
.name("pool-library-reader".into())
.spawn(move || {
read_library(
worker.cmd_rx,
ad,
pp,
rr,
sr,
sd,
qf,
vcpu0_handle,
transport_idle,
)
})
.map_err(PoolError::ThreadSpawn)?;
}
Ok(Self {
pause_pending,
restore_req,
snapshot_req,
snapshot_done,
quit,
writer,
active_done,
busy,
})
}
pub fn should_quit(&self) -> bool {
self.quit.load(Ordering::SeqCst)
}
pub fn pause_requested(&self) -> bool {
self.pause_pending.load(Ordering::SeqCst)
}
pub fn pause_flag(&self) -> &AtomicBool {
&self.pause_pending
}
pub fn clear_pause(&self) {
self.pause_pending.store(false, Ordering::SeqCst);
}
pub fn take_snapshot_request(&self) -> Option<SnapshotRequest> {
self.snapshot_req.lock().ok()?.take()
}
pub fn post_snapshot_result(&self, result: Result<SnapshotResult, String>) {
if let Ok(mut g) = self.snapshot_done.lock() {
if let Some(tx) = g.take() {
let _ = tx.send(result);
}
}
}
pub fn take_restore_request(&self) -> Option<RestoreRequest> {
self.restore_req.lock().unwrap().take()
}
pub fn complete_restore(
&mut self,
us: u128,
host_port: Option<u16>,
timings: WarmRestoreTimings,
) {
let result = PoolRestoreResult {
restore_us: us,
host_port,
timings,
};
if let Some(w) = self.writer.as_mut() {
let _ = write_done(w, us, host_port, timings);
}
if let Some(active_done) = self.active_done.as_ref() {
if let Some(tx) = active_done.lock().unwrap().take() {
let _ = tx.send(result);
}
}
if let Some(busy) = self.busy.as_ref() {
busy.store(false, Ordering::SeqCst);
}
}
}
fn write_done(
w: &mut UnixStream,
us: u128,
host_port: Option<u16>,
timings: WarmRestoreTimings,
) -> std::io::Result<()> {
write!(w, "DONE {us}")?;
if let Some(port) = host_port {
write!(w, " host_port={port}")?;
}
writeln!(
w,
" reset_vsock_us={} remap_cow_us={} load_meta_us={} restore_snapshot_us={} ram_copy_us={} gic_restore_us={} vcpu_restore_us={} vtimer_offset_us={} mmio_restore_us={} listener_restore_us={}",
timings.reset_vsock_us,
timings.remap_cow_us,
timings.load_meta_us,
timings.restore_snapshot_us,
timings.ram_copy_us,
timings.gic_restore_us,
timings.vcpu_restore_us,
timings.vtimer_offset_us,
timings.mmio_restore_us,
timings.listener_restore_us
)
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
fn read_supervisor(
read_half: UnixStream,
pause_pending: Arc<AtomicBool>,
restore_req: Arc<Mutex<Option<RestoreRequest>>>,
quit: Arc<AtomicBool>,
vcpu0_handle: applevisor_sys::hv_vcpu_t,
transport_idle: Option<TransportIdleCheck>,
) {
use std::io::BufRead;
let mut reader = std::io::BufReader::new(read_half);
loop {
let mut line = String::new();
match reader.read_line(&mut line) {
Ok(0) | Err(_) => {
quit.store(true, Ordering::SeqCst);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
break;
}
Ok(_) => {}
}
let cmd = line.trim();
if let Some(rest) = cmd.strip_prefix("RESTORE ") {
let mut parts = rest.split_ascii_whitespace();
let path = parts.next().unwrap_or("").to_string();
let mut egress_policy = None;
for kv in parts {
if let Some(v) = kv.strip_prefix("egress_policy=") {
egress_policy = Some(v.to_string());
}
}
wait_until_transport_idle(transport_idle.as_ref(), &quit);
*restore_req.lock().unwrap() = Some(RestoreRequest {
path,
egress_policy,
});
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
} else if cmd == "QUIT" {
quit.store(true, Ordering::SeqCst);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
break;
}
}
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
fn read_library(
cmd_rx: mpsc::Receiver<PoolCommand>,
active_done: Arc<Mutex<Option<mpsc::Sender<PoolRestoreResult>>>>,
pause_pending: Arc<AtomicBool>,
restore_req: Arc<Mutex<Option<RestoreRequest>>>,
snapshot_req: Arc<Mutex<Option<SnapshotRequest>>>,
snapshot_done: Arc<Mutex<Option<mpsc::Sender<Result<SnapshotResult, String>>>>>,
quit: Arc<AtomicBool>,
vcpu0_handle: applevisor_sys::hv_vcpu_t,
transport_idle: Option<TransportIdleCheck>,
) {
loop {
match cmd_rx.recv() {
Ok(PoolCommand::Restore { request, done_tx }) => {
*active_done.lock().unwrap() = Some(done_tx);
wait_until_transport_idle(transport_idle.as_ref(), &quit);
*restore_req.lock().unwrap() = Some(request);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
}
Ok(PoolCommand::Snapshot { request, done_tx }) => {
*snapshot_done.lock().unwrap() = Some(done_tx);
wait_until_transport_idle(transport_idle.as_ref(), &quit);
*snapshot_req.lock().unwrap() = Some(request);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
}
Ok(PoolCommand::Quit) => {
active_done.lock().unwrap().take();
quit.store(true, Ordering::SeqCst);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
break;
}
Err(_) => {
active_done.lock().unwrap().take();
quit.store(true, Ordering::SeqCst);
pause_pending.store(true, Ordering::SeqCst);
exit_vcpu(vcpu0_handle);
break;
}
}
}
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
fn wait_until_transport_idle(transport_idle: Option<&TransportIdleCheck>, quit: &AtomicBool) {
let Some(transport_idle) = transport_idle else {
return;
};
while !quit.load(Ordering::SeqCst) && !transport_idle() {
std::thread::sleep(Duration::from_micros(100));
}
}
#[cfg(all(target_os = "macos", target_arch = "aarch64"))]
fn exit_vcpu(vcpu0_handle: applevisor_sys::hv_vcpu_t) {
unsafe {
let _ = applevisor_sys::hv_vcpus_exit(&vcpu0_handle, 1);
}
}