#![cfg(all(target_os = "macos", target_arch = "aarch64"))]
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::{Arc, Condvar, Mutex};
use crate::vmm::snapshot::PerVcpuState;
pub const PSCI_VERSION: u32 = 0x84000000;
pub const PSCI_CPU_OFF: u32 = 0x84000002;
pub const PSCI_CPU_ON: u32 = 0xC4000003;
pub const PSCI_AFFINITY_INFO: u32 = 0xC4000004;
pub const PSCI_FEATURES: u32 = 0x8400000A;
pub const PSCI_SYSTEM_OFF: u32 = 0x84000008;
pub const PSCI_SYSTEM_RESET: u32 = 0x84000009;
pub const PSCI_SUCCESS: i64 = 0;
pub const PSCI_NOT_SUPPORTED: i64 = -1;
pub const PSCI_INVALID_PARAMS: i64 = -2;
pub const PSCI_ALREADY_ON: i64 = -4;
#[derive(Clone)]
pub enum VcpuStart {
Parked,
Run { entry: u64, ctx_id: u64 },
}
pub struct VcpuSlot {
pub state: Mutex<VcpuStart>,
pub cv: Condvar,
pub on: AtomicBool,
}
pub struct VcpuCoordinator {
pub n_vcpus: u32,
pub slots: Vec<VcpuSlot>,
pub shutdown: AtomicBool,
pub snapshot_request: AtomicBool,
pub captured: Mutex<Vec<Option<PerVcpuState>>>,
pub captured_count: AtomicU32,
pub resume_lock: Mutex<u64>, pub resume_cv: Condvar,
pub secondary_handles: Mutex<Vec<applevisor_sys::hv_vcpu_t>>,
}
impl VcpuCoordinator {
pub fn new(n_vcpus: u32) -> Arc<Self> {
let slots = (0..n_vcpus)
.map(|_| VcpuSlot {
state: Mutex::new(VcpuStart::Parked),
cv: Condvar::new(),
on: AtomicBool::new(false),
})
.collect();
Arc::new(Self {
n_vcpus,
slots,
shutdown: AtomicBool::new(false),
snapshot_request: AtomicBool::new(false),
captured: Mutex::new((0..n_vcpus).map(|_| None).collect()),
captured_count: AtomicU32::new(0),
resume_lock: Mutex::new(0),
resume_cv: Condvar::new(),
secondary_handles: Mutex::new(Vec::new()),
})
}
pub fn register_secondary(&self, handle: applevisor_sys::hv_vcpu_t) {
self.secondary_handles.lock().unwrap().push(handle);
}
pub fn secondary_handles_snapshot(&self) -> Vec<applevisor_sys::hv_vcpu_t> {
self.secondary_handles.lock().unwrap().clone()
}
pub fn maybe_pause_for_snapshot(
&self,
idx: u32,
vcpu: &crate::hvf::Vcpu,
) -> crate::hvf::Result<()> {
if !self.snapshot_request.load(Ordering::Acquire) {
return Ok(());
}
let saved_gen = *self.resume_lock.lock().unwrap();
let st = crate::vmm::snapshot::capture_vcpu_state(vcpu)?;
if std::env::var_os("SUPERMACHINE_TIMINGS").is_some() {
let pc = st
.gp_regs
.iter()
.find(|(id, _)| *id == applevisor_sys::hv_reg_t::PC as u32)
.map(|(_, v)| *v)
.unwrap_or(0);
eprintln!(" [vcpu-{idx}] snapshot pause: PC=0x{pc:x}");
}
let _ = idx; self.captured.lock().unwrap()[idx as usize] = Some(st);
self.captured_count.fetch_add(1, Ordering::AcqRel);
let mut g = self.resume_lock.lock().unwrap();
while *g == saved_gen && !self.shutdown.load(Ordering::Acquire) {
g = self.resume_cv.wait(g).unwrap();
}
Ok(())
}
pub fn request_snapshot_pause(
&self,
secondary_handles: &[applevisor_sys::hv_vcpu_t],
) {
{
let mut g = self.captured.lock().unwrap();
for s in g.iter_mut() {
*s = None;
}
}
self.captured_count.store(0, Ordering::SeqCst);
self.snapshot_request.store(true, Ordering::Release);
if !secondary_handles.is_empty() {
unsafe {
let _ = applevisor_sys::hv_vcpus_exit(
secondary_handles.as_ptr(),
secondary_handles.len() as u32,
);
}
}
let target: u32 = self
.slots
.iter()
.skip(1)
.filter(|s| s.on.load(Ordering::Acquire))
.count() as u32;
let deadline = std::time::Instant::now() + std::time::Duration::from_secs(2);
while self.captured_count.load(Ordering::Acquire) < target {
if self.shutdown.load(Ordering::Acquire) {
return;
}
if std::time::Instant::now() > deadline {
eprintln!(
" [coord] snapshot-pause timeout: {}/{} secondaries deposited",
self.captured_count.load(Ordering::Acquire),
target,
);
break;
}
std::hint::spin_loop();
}
}
pub fn release_after_snapshot(&self) {
self.snapshot_request.store(false, Ordering::Release);
let mut g = self.resume_lock.lock().unwrap();
*g = g.wrapping_add(1);
self.resume_cv.notify_all();
}
pub fn take_secondary_states(&self) -> Vec<PerVcpuState> {
let mut g = self.captured.lock().unwrap();
let mut out = Vec::with_capacity(g.len().saturating_sub(1));
for st in g.iter_mut().skip(1) {
out.push(st.take().unwrap_or_default());
}
out
}
pub fn cpu_on(&self, target: u32, entry: u64, ctx_id: u64) -> i64 {
let Some(slot) = self.slots.get(target as usize) else {
return PSCI_INVALID_PARAMS;
};
if slot.on.load(Ordering::SeqCst) {
return PSCI_ALREADY_ON;
}
let mut s = slot.state.lock().unwrap();
*s = VcpuStart::Run { entry, ctx_id };
slot.cv.notify_one();
PSCI_SUCCESS
}
pub fn affinity_info(&self, target: u32) -> i64 {
match self.slots.get(target as usize) {
Some(slot) if slot.on.load(Ordering::SeqCst) => 0, Some(_) => 1, None => PSCI_INVALID_PARAMS,
}
}
pub fn wait_for_run(&self, idx: u32) -> Option<(u64, u64)> {
let slot = &self.slots[idx as usize];
let mut s = slot.state.lock().unwrap();
loop {
if self.shutdown.load(Ordering::SeqCst) {
return None;
}
if let VcpuStart::Run { entry, ctx_id } = *s {
slot.on.store(true, Ordering::SeqCst);
return Some((entry, ctx_id));
}
s = slot.cv.wait(s).unwrap();
}
}
}