Skip to main content

axvcpu/
vcpu.rs

1// Copyright 2025 The Axvisor Team
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7//     http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use core::cell::{RefCell, UnsafeCell};
16
17use axaddrspace::{GuestPhysAddr, HostPhysAddr};
18use axerrno::{AxResult, ax_err};
19use axvisor_api::vmm::{VCpuId, VMId};
20
21use super::{AxArchVCpu, AxVCpuExitReason};
22
23/// Immutable configuration data for a virtual CPU.
24///
25/// This structure contains the constant properties of a VCpu that don't change
26/// after creation, such as CPU affinity settings and identifiers.
27struct AxVCpuInnerConst {
28    /// Unique identifier of the VM this VCpu belongs to
29    vm_id: VMId,
30    /// Unique identifier of this VCpu within its VM
31    vcpu_id: VCpuId,
32    /// Physical CPU ID that has priority to run this VCpu
33    /// Used for CPU affinity optimization
34    favor_phys_cpu: usize,
35    /// Bitmask of physical CPUs that can run this VCpu
36    /// If `None`, the VCpu can run on any available physical CPU
37    /// Similar to Linux CPU_SET functionality
38    phys_cpu_set: Option<usize>,
39}
40
41/// Represents the current execution state of a virtual CPU.
42///
43/// The VCpu follows a strict state machine:
44/// Created → Free → Ready → Running
45///
46/// Invalid state is used when errors occur during state transitions.
47#[derive(Clone, Copy, Debug, PartialEq, Eq)]
48pub enum VCpuState {
49    /// Invalid state - indicates an error occurred during state transition
50    Invalid = 0,
51    /// Initial state after VCpu creation, not yet initialized
52    Created = 1,
53    /// VCpu is initialized and ready to be bound to a physical CPU
54    Free = 2,
55    /// VCpu is bound to a physical CPU and ready for execution
56    Ready = 3,
57    /// VCpu is currently executing on a physical CPU
58    Running = 4,
59    /// VCpu execution is blocked (waiting for I/O, etc.)
60    Blocked = 5,
61}
62
63/// Mutable runtime state of a virtual CPU.
64///
65/// This structure contains data that changes during VCpu execution,
66/// protected by RefCell for interior mutability.
67pub struct AxVCpuInnerMut {
68    /// Current execution state of the VCpu
69    state: VCpuState,
70}
71
72/// Architecture-independent virtual CPU implementation.
73///
74/// This is the main VCpu abstraction that provides a unified interface for
75/// managing virtual CPUs across different architectures. It delegates
76/// architecture-specific operations to implementations of the `AxArchVCpu` trait.
77///
78/// Note that:
79/// - This struct handles internal mutability itself, almost all the methods are `&self`.
80/// - This struct is not thread-safe. It's caller's responsibility to ensure the safety.
81pub struct AxVCpu<A: AxArchVCpu> {
82    /// Immutable VCpu configuration (VM ID, CPU affinity, etc.)
83    inner_const: AxVCpuInnerConst,
84    /// Mutable VCpu state protected by RefCell for safe interior mutability
85    inner_mut: RefCell<AxVCpuInnerMut>,
86    /// Architecture-specific VCpu implementation
87    ///
88    /// Uses UnsafeCell instead of RefCell because RefCell guards cannot be
89    /// dropped during VCpu execution (when control is transferred to guest)
90    arch_vcpu: UnsafeCell<A>,
91}
92
93impl<A: AxArchVCpu> AxVCpu<A> {
94    /// Creates a new virtual CPU instance.
95    ///
96    /// Initializes a VCpu with the given configuration and creates the underlying
97    /// architecture-specific implementation. The VCpu starts in the `Created` state.
98    ///
99    /// # Arguments
100    ///
101    /// * `vm_id` - Unique identifier of the VM this VCpu belongs to
102    /// * `vcpu_id` - Unique identifier for this VCpu within the VM
103    /// * `favor_phys_cpu` - Physical CPU ID that should preferentially run this VCpu
104    /// * `phys_cpu_set` - Optional bitmask of allowed physical CPUs (None = no restriction)
105    /// * `arch_config` - Architecture-specific configuration for VCpu creation
106    ///
107    /// # Returns
108    ///
109    /// Returns `Ok(AxVCpu)` on success, or an error if architecture-specific creation fails.
110    pub fn new(
111        vm_id: VMId,
112        vcpu_id: VCpuId,
113        favor_phys_cpu: usize,
114        phys_cpu_set: Option<usize>,
115        arch_config: A::CreateConfig,
116    ) -> AxResult<Self> {
117        Ok(Self {
118            inner_const: AxVCpuInnerConst {
119                vm_id,
120                vcpu_id,
121                favor_phys_cpu,
122                phys_cpu_set,
123            },
124            inner_mut: RefCell::new(AxVCpuInnerMut {
125                state: VCpuState::Created,
126            }),
127            arch_vcpu: UnsafeCell::new(A::new(vm_id, vcpu_id, arch_config)?),
128        })
129    }
130
131    /// Sets up the VCpu for execution.
132    ///
133    /// Configures the VCpu's entry point, memory management (EPT root), and any
134    /// architecture-specific setup. Transitions the VCpu from `Created` to `Free` state.
135    pub fn setup(
136        &self,
137        entry: GuestPhysAddr,
138        ept_root: HostPhysAddr,
139        arch_config: A::SetupConfig,
140    ) -> AxResult {
141        self.manipulate_arch_vcpu(VCpuState::Created, VCpuState::Free, |arch_vcpu| {
142            arch_vcpu.set_entry(entry)?;
143            arch_vcpu.set_ept_root(ept_root)?;
144            arch_vcpu.setup(arch_config)?;
145            Ok(())
146        })
147    }
148
149    /// Returns the unique identifier of this VCpu.
150    pub const fn id(&self) -> VCpuId {
151        self.inner_const.vcpu_id
152    }
153
154    /// Get the id of the VM this vcpu belongs to.
155    pub const fn vm_id(&self) -> VMId {
156        self.inner_const.vm_id
157    }
158
159    /// Returns the preferred physical CPU for this VCpu.
160    ///
161    /// This is used for CPU affinity optimization - the scheduler should
162    /// preferentially run this VCpu on the returned physical CPU ID.
163    ///
164    /// # Note
165    ///
166    /// Currently unused in the implementation but reserved for future
167    /// scheduler optimizations.
168    pub const fn favor_phys_cpu(&self) -> usize {
169        self.inner_const.favor_phys_cpu
170    }
171
172    /// Returns the set of physical CPUs that can run this VCpu.
173    pub const fn phys_cpu_set(&self) -> Option<usize> {
174        self.inner_const.phys_cpu_set
175    }
176
177    /// Checks if this VCpu is the Bootstrap Processor (BSP).
178    ///
179    /// By convention, the VCpu with ID 0 is always considered the BSP,
180    /// which is responsible for system initialization in multi-core VMs.
181    pub const fn is_bsp(&self) -> bool {
182        self.inner_const.vcpu_id == 0
183    }
184
185    /// Gets the current execution state of the VCpu.
186    pub fn state(&self) -> VCpuState {
187        self.inner_mut.borrow().state
188    }
189
190    /// Set the state of the VCpu.
191    /// # Safety
192    /// This method is unsafe because it may break the state transition model.
193    /// Use it with caution.
194    pub unsafe fn set_state(&self, state: VCpuState) {
195        self.inner_mut.borrow_mut().state = state;
196    }
197
198    /// Execute a block with the state of the VCpu transitioned from `from` to `to`. If the current state is not `from`, return an error.
199    ///
200    /// The state will be set to [`VCpuState::Invalid`] if an error occurs (including the case that the current state is not `from`).
201    ///
202    /// The state will be set to `to` if the block is executed successfully.
203    pub fn with_state_transition<F, T>(&self, from: VCpuState, to: VCpuState, f: F) -> AxResult<T>
204    where
205        F: FnOnce() -> AxResult<T>,
206    {
207        let mut inner_mut = self.inner_mut.borrow_mut();
208        if inner_mut.state != from {
209            inner_mut.state = VCpuState::Invalid;
210            ax_err!(
211                BadState,
212                format!("VCpu state is not {:?}, but {:?}", from, inner_mut.state)
213            )
214        } else {
215            let result = f();
216            inner_mut.state = if result.is_err() {
217                VCpuState::Invalid
218            } else {
219                to
220            };
221            result
222        }
223    }
224
225    /// Execute a block with the current VCpu set to `&self`.
226    pub fn with_current_cpu_set<F, T>(&self, f: F) -> T
227    where
228        F: FnOnce() -> T,
229    {
230        if get_current_vcpu::<A>().is_some() {
231            panic!("Nested vcpu operation is not allowed!");
232        } else {
233            unsafe {
234                set_current_vcpu(self);
235            }
236            let result = f();
237            unsafe {
238                clear_current_vcpu::<A>();
239            }
240            result
241        }
242    }
243
244    /// Execute an operation on the architecture-specific VCpu, with the state transitioned from `from` to `to` and the current VCpu set to `&self`.
245    ///
246    /// This method is a combination of [`AxVCpu::with_state_transition`] and [`AxVCpu::with_current_cpu_set`].
247    pub fn manipulate_arch_vcpu<F, T>(&self, from: VCpuState, to: VCpuState, f: F) -> AxResult<T>
248    where
249        F: FnOnce(&mut A) -> AxResult<T>,
250    {
251        self.with_state_transition(from, to, || {
252            self.with_current_cpu_set(|| f(self.get_arch_vcpu()))
253        })
254    }
255
256    /// Transition the state of the VCpu. If the current state is not `from`, return an error.
257    pub fn transition_state(&self, from: VCpuState, to: VCpuState) -> AxResult {
258        self.with_state_transition(from, to, || Ok(()))
259    }
260
261    /// Get the architecture-specific VCpu.
262    #[allow(clippy::mut_from_ref)]
263    pub fn get_arch_vcpu(&self) -> &mut A {
264        unsafe { &mut *self.arch_vcpu.get() }
265    }
266
267    /// Run the VCpu.
268    pub fn run(&self) -> AxResult<AxVCpuExitReason> {
269        self.transition_state(VCpuState::Ready, VCpuState::Running)?;
270        self.manipulate_arch_vcpu(VCpuState::Running, VCpuState::Ready, |arch_vcpu| {
271            arch_vcpu.run()
272        })
273    }
274
275    /// Bind the VCpu to the current physical CPU.
276    pub fn bind(&self) -> AxResult {
277        self.manipulate_arch_vcpu(VCpuState::Free, VCpuState::Ready, |arch_vcpu| {
278            arch_vcpu.bind()
279        })
280    }
281
282    /// Unbind the VCpu from the current physical CPU.
283    pub fn unbind(&self) -> AxResult {
284        self.manipulate_arch_vcpu(VCpuState::Ready, VCpuState::Free, |arch_vcpu| {
285            arch_vcpu.unbind()
286        })
287    }
288
289    /// Sets the entry address of the VCpu.
290    pub fn set_entry(&self, entry: GuestPhysAddr) -> AxResult {
291        self.get_arch_vcpu().set_entry(entry)
292    }
293
294    /// Sets the value of a general-purpose register according to the given index.
295    pub fn set_gpr(&self, reg: usize, val: usize) {
296        self.get_arch_vcpu().set_gpr(reg, val);
297    }
298
299    /// Inject an interrupt to the VCpu.
300    pub fn inject_interrupt(&self, vector: usize) -> AxResult {
301        self.get_arch_vcpu().inject_interrupt(vector)
302    }
303
304    /// Sets the return value of the VCpu.
305    pub fn set_return_value(&self, val: usize) {
306        self.get_arch_vcpu().set_return_value(val);
307    }
308}
309
310#[percpu::def_percpu]
311static mut CURRENT_VCPU: Option<*mut u8> = None;
312
313/// Get the current VCpu on the current physical CPU.
314///
315/// It's guaranteed that each time before a method of [`AxArchVCpu`] is called, the current VCpu is set to the corresponding [`AxVCpu`].
316/// So methods of [`AxArchVCpu`] can always get the [`AxVCpu`] containing itself by calling this method.
317#[allow(static_mut_refs)]
318pub fn get_current_vcpu<'a, A: AxArchVCpu>() -> Option<&'a AxVCpu<A>> {
319    unsafe {
320        CURRENT_VCPU
321            .current_ref_raw()
322            .as_ref()
323            .copied()
324            .and_then(|p| (p as *const AxVCpu<A>).as_ref())
325    }
326}
327
328/// Get a mutable reference to the current VCpu on the current physical CPU.
329///
330/// See [`get_current_vcpu`] for more details.
331#[allow(static_mut_refs)]
332pub fn get_current_vcpu_mut<'a, A: AxArchVCpu>() -> Option<&'a mut AxVCpu<A>> {
333    unsafe {
334        CURRENT_VCPU
335            .current_ref_mut_raw()
336            .as_mut()
337            .copied()
338            .and_then(|p| (p as *mut AxVCpu<A>).as_mut())
339    }
340}
341
342/// Set the current VCpu on the current physical CPU.
343///
344/// # Safety
345/// This method is marked as unsafe because it may result in unexpected behavior if not used properly.
346/// Do not call this method unless you know what you are doing.
347#[allow(static_mut_refs)]
348pub unsafe fn set_current_vcpu<A: AxArchVCpu>(vcpu: &AxVCpu<A>) {
349    unsafe {
350        CURRENT_VCPU
351            .current_ref_mut_raw()
352            .replace(vcpu as *const _ as *mut u8);
353    }
354}
355
356/// Clear the current vcpu on the current physical CPU.
357///
358/// # Safety
359/// This method is marked as unsafe because it may result in unexpected behavior if not used properly.
360/// Do not call this method unless you know what you are doing.
361#[allow(static_mut_refs)]
362pub unsafe fn clear_current_vcpu<A: AxArchVCpu>() {
363    unsafe {
364        CURRENT_VCPU.current_ref_mut_raw().take();
365    }
366}