aarch64_rt/lib.rs
1// Copyright 2025 The aarch64-rt Authors.
2// This project is dual-licensed under Apache 2.0 and MIT terms.
3// See LICENSE-APACHE and LICENSE-MIT for details.
4
5//! Startup code for aarch64 Cortex-A processors.
6
7#![no_std]
8#![deny(clippy::undocumented_unsafe_blocks)]
9#![deny(unsafe_op_in_unsafe_fn)]
10
11#[cfg(any(
12 all(feature = "el1", feature = "el2"),
13 all(feature = "el1", feature = "el3"),
14 all(feature = "el2", feature = "el3"),
15))]
16compile_error!("Only one `el` feature may be enabled at once.");
17
18mod entry;
19#[cfg(feature = "exceptions")]
20mod exceptions;
21#[cfg(feature = "initial-pagetable")]
22mod pagetable;
23
24#[cfg(feature = "initial-pagetable")]
25#[doc(hidden)]
26pub mod __private {
27 pub use crate::pagetable::{__enable_mmu_el1, __enable_mmu_el2, __enable_mmu_el3};
28}
29
30#[cfg(any(feature = "exceptions", feature = "psci"))]
31use core::arch::asm;
32#[cfg(not(feature = "initial-pagetable"))]
33use core::arch::naked_asm;
34use core::mem::ManuallyDrop;
35pub use entry::{SuspendContext, secondary_entry, warm_boot_entry};
36#[cfg(feature = "exceptions")]
37pub use exceptions::{ExceptionHandlers, RegisterState, RegisterStateRef};
38#[cfg(all(feature = "initial-pagetable", feature = "el1"))]
39pub use pagetable::DEFAULT_TCR_EL1 as DEFAULT_TCR;
40#[cfg(all(feature = "initial-pagetable", feature = "el2"))]
41pub use pagetable::DEFAULT_TCR_EL2 as DEFAULT_TCR;
42#[cfg(all(feature = "initial-pagetable", feature = "el3"))]
43pub use pagetable::DEFAULT_TCR_EL3 as DEFAULT_TCR;
44#[cfg(feature = "initial-pagetable")]
45pub use pagetable::{
46 DEFAULT_MAIR, DEFAULT_SCTLR, DEFAULT_TCR_EL1, DEFAULT_TCR_EL2, DEFAULT_TCR_EL3,
47 InitialPagetable,
48};
49
50/// No-op when the `initial-pagetable` feature isn't enabled.
51///
52/// # Safety
53///
54/// Not really unsafe in this case, but needs to be consistent with the signature when the
55/// `initial-pagetable` feature is enabled.
56#[cfg(not(feature = "initial-pagetable"))]
57#[unsafe(naked)]
58#[unsafe(link_section = ".init")]
59#[unsafe(export_name = "enable_mmu")]
60pub unsafe extern "C" fn enable_mmu() {
61 naked_asm!("ret")
62}
63
64#[cfg(feature = "initial-pagetable")]
65unsafe extern "C" {
66 /// Enables the MMU and caches with the initial pagetable.
67 ///
68 /// This is called automatically from entry point code both for primary and secondary CPUs so
69 /// you usually won't need to call this yourself, but is available in case you need to implement
70 /// your own assembly entry point.
71 ///
72 /// # Safety
73 ///
74 /// The initial pagetable must correctly map everything that the program uses.
75 pub unsafe fn enable_mmu();
76}
77
78/// Sets the appropriate vbar to point to our `vector_table`, if the `exceptions` feature is
79/// enabled.
80///
81/// If `exceptions` is not enabled then this is a no-op.
82pub extern "C" fn set_exception_vector() {
83 // SAFETY: We provide a valid vector table.
84 #[cfg(all(feature = "el1", feature = "exceptions"))]
85 unsafe {
86 asm!(
87 "adr x9, vector_table_el1",
88 "msr vbar_el1, x9",
89 options(nomem, nostack),
90 out("x9") _,
91 );
92 }
93 // SAFETY: We provide a valid vector table.
94 #[cfg(all(feature = "el2", feature = "exceptions"))]
95 unsafe {
96 asm!(
97 "adr x9, vector_table_el2",
98 "msr vbar_el2, x9",
99 options(nomem, nostack),
100 out("x9") _,
101 );
102 }
103 // SAFETY: We provide a valid vector table.
104 #[cfg(all(feature = "el3", feature = "exceptions"))]
105 unsafe {
106 asm!(
107 "adr x9, vector_table_el3",
108 "msr vbar_el3, x9",
109 options(nomem, nostack),
110 out("x9") _,
111 );
112 }
113 #[cfg(all(
114 feature = "exceptions",
115 not(any(feature = "el1", feature = "el2", feature = "el3"))
116 ))]
117 {
118 let current_el: u64;
119 // SAFETY: Reading CurrentEL is always safe.
120 unsafe {
121 asm!(
122 "mrs {current_el}, CurrentEL",
123 options(nomem, nostack, preserves_flags),
124 current_el = out(reg) current_el,
125 );
126 }
127 match (current_el >> 2) & 0b11 {
128 // SAFETY: We provide a valid vector table.
129 1 => unsafe {
130 asm!(
131 "adr x9, vector_table_el1",
132 "msr vbar_el1, x9",
133 options(nomem, nostack, preserves_flags),
134 out("x9") _,
135 );
136 },
137 // SAFETY: We provide a valid vector table.
138 2 => unsafe {
139 asm!(
140 "adr x9, vector_table_el2",
141 "msr vbar_el2, x9",
142 options(nomem, nostack, preserves_flags),
143 out("x9") _,
144 );
145 },
146 // SAFETY: We provide a valid vector table.
147 3 => unsafe {
148 asm!(
149 "adr x9, vector_table_el3",
150 "msr vbar_el3, x9",
151 options(nomem, nostack, preserves_flags),
152 out("x9") _,
153 );
154 },
155 _ => {
156 panic!("Unexpected EL");
157 }
158 }
159 }
160}
161
162extern "C" fn rust_entry(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> ! {
163 set_exception_vector();
164 __main(arg0, arg1, arg2, arg3)
165}
166
167unsafe extern "Rust" {
168 /// Main function provided by the application using the `main!` macro.
169 safe fn __main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> !;
170}
171
172/// Marks the main function of the binary and reserves space for the boot stack.
173///
174/// Example:
175///
176/// ```rust
177/// use aarch64_rt::entry;
178///
179/// entry!(main);
180/// fn main() -> ! {
181/// info!("Hello world");
182/// }
183/// ```
184///
185/// 40 pages (160 KiB) is reserved for the boot stack by default; a different size may be configured
186/// by passing the number of pages as a second argument to the macro, e.g. `entry!(main, 10);` to
187/// reserve only 10 pages.
188#[macro_export]
189macro_rules! entry {
190 ($name:path) => {
191 entry!($name, 40);
192 };
193 ($name:path, $boot_stack_pages:expr) => {
194 #[unsafe(export_name = "boot_stack")]
195 #[unsafe(link_section = ".stack.boot_stack")]
196 static mut __BOOT_STACK: $crate::Stack<$boot_stack_pages> = $crate::Stack::new();
197
198 // Export a symbol with a name matching the extern declaration above.
199 #[unsafe(export_name = "__main")]
200 fn __main(arg0: u64, arg1: u64, arg2: u64, arg3: u64) -> ! {
201 // Ensure that the main function provided by the application has the correct type.
202 $name(arg0, arg1, arg2, arg3)
203 }
204 };
205}
206
207/// A stack for some CPU core.
208///
209/// This is used by the [`entry!`] macro to reserve space for the boot stack.
210#[repr(C, align(4096))]
211pub struct Stack<const NUM_PAGES: usize>([StackPage; NUM_PAGES]);
212
213impl<const NUM_PAGES: usize> Stack<NUM_PAGES> {
214 /// Creates a new zero-initialised stack.
215 pub const fn new() -> Self {
216 Self([const { StackPage::new() }; NUM_PAGES])
217 }
218}
219
220impl<const NUM_PAGES: usize> Default for Stack<NUM_PAGES> {
221 fn default() -> Self {
222 Self::new()
223 }
224}
225
226#[repr(C, align(4096))]
227struct StackPage([u8; 4096]);
228
229impl StackPage {
230 const fn new() -> Self {
231 Self([0; 4096])
232 }
233}
234
235#[repr(C)]
236pub(crate) struct StartCoreStack<F> {
237 entry_ptr: *mut ManuallyDrop<F>,
238 trampoline_ptr: unsafe extern "C" fn(&mut ManuallyDrop<F>) -> !,
239}
240
241#[cfg(feature = "psci")]
242/// Issues a PSCI CPU_ON call to start the CPU core with the given MPIDR.
243///
244/// This starts the core with an assembly entry point which will enable the MMU, disable trapping of
245/// floating point instructions, initialise the stack pointer to the given value, and then jump to
246/// the given Rust entry point function, passing it the given argument value.
247///
248/// The closure passed as `rust_entry` **should never return**. Because the
249/// [never type has not been stabilized](https://github.com/rust-lang/rust/issues/35121)), this
250/// cannot be enforced by the type system yet.
251///
252/// # Safety
253///
254/// `stack` must point to a region of memory which is reserved for this core's stack. It must remain
255/// valid as long as the core is running, and there must not be any other access to it during that
256/// time. It must be mapped both for the current core to write to it (to pass initial parameters)
257/// and in the initial page table which the core being started will used, with the same memory
258/// attributes for both.
259// TODO: change `F` generic bounds to `FnOnce() -> !` when the never type is stabilized:
260// https://github.com/rust-lang/rust/issues/35121
261pub unsafe fn start_core<C: smccc::Call, F: FnOnce() + Send + 'static, const N: usize>(
262 mpidr: u64,
263 stack: *mut Stack<N>,
264 rust_entry: F,
265) -> Result<(), smccc::psci::Error> {
266 const {
267 assert!(
268 size_of::<StartCoreStack<F>>()
269 + 2 * size_of::<F>()
270 + 2 * align_of::<F>()
271 + 1024 // trampoline stack frame overhead
272 <= size_of::<Stack<N>>(),
273 "the `rust_entry` closure is too big to fit in the core stack"
274 );
275 }
276
277 let rust_entry = ManuallyDrop::new(rust_entry);
278
279 let stack_start = stack.cast::<u8>();
280 let align_offfset = stack_start.align_offset(align_of::<F>());
281 let entry_ptr = stack_start
282 .wrapping_add(align_offfset)
283 .cast::<ManuallyDrop<F>>();
284
285 assert!(stack.is_aligned());
286 // The stack grows downwards on aarch64, so get a pointer to the end of the stack.
287 let stack_end = stack.wrapping_add(1);
288 let params = stack_end.cast::<StartCoreStack<F>>().wrapping_sub(1);
289
290 // Write the trampoline and entry closure, so the assembly entry point can jump to it.
291 // SAFETY: Our caller promised that the stack is valid and nothing else will access it.
292 unsafe {
293 entry_ptr.write(rust_entry);
294 *params = StartCoreStack {
295 entry_ptr,
296 trampoline_ptr: trampoline::<F>,
297 };
298 };
299
300 // Wait for the stores above to complete before starting the secondary CPU core.
301 dsb_st();
302
303 smccc::psci::cpu_on::<C>(
304 mpidr,
305 secondary_entry as usize as _,
306 stack_end as usize as _,
307 )
308}
309
310#[cfg(feature = "psci")]
311/// Used by [`start_core`] as an entry point for the secondary CPU core.
312///
313/// # Safety
314///
315/// This calls [`ManuallyDrop::take`] on the provided argument, so this function must be
316/// called at most once for a given instance of `F`.
317// TODO: change `F` generic bounds to `FnOnce() -> !` when the never type is stabilized:
318// https://github.com/rust-lang/rust/issues/35121
319unsafe extern "C" fn trampoline<F: FnOnce() + Send + 'static>(entry: &mut ManuallyDrop<F>) -> ! {
320 // SAFETY: the trampoline function is only ever called once after creating ManuallyDrop
321 // instance, so we won't call ManuallyDrop::take more than once.
322 let entry = unsafe { ManuallyDrop::take(entry) };
323 entry();
324
325 panic!("rust_entry function passed to start_core should never return");
326}
327
328/// Data synchronisation barrier that waits for stores to complete, for the full system.
329#[cfg(feature = "psci")]
330fn dsb_st() {
331 // SAFETY: A synchronisation barrier is always safe.
332 unsafe {
333 asm!("dsb st", options(nostack));
334 }
335}
336
337#[cfg(feature = "psci")]
338/// Issues a PSCI CPU_SUSPEND call to suspend the current CPU core.
339///
340/// If the PSCI CPU_SUSPEND call doesn't return, then on resume `warm_boot_entry` will be called to
341/// re-enable the MMU, set the given stack pointer, set the exception vector, and then call the
342/// given `entry` function with `arg` as its parameter.
343///
344/// # Safety
345///
346/// `stack_ptr` must be a valid stack pointer to use for the resuming core. Depending on how you
347/// want to handle resuming this could either be the bottom of the stack (if you want to treat
348/// resuming like `CPU_ON`) or the top (if `entry` will restore register state and return from the
349/// point where the suspend happened).
350pub unsafe fn suspend_core<C: smccc::Call>(
351 power_state: u32,
352 stack_ptr: *mut u64,
353 entry: extern "C" fn(u64) -> !,
354 arg: u64,
355) -> Result<(), smccc::psci::Error> {
356 let suspend_context = SuspendContext {
357 stack_ptr,
358 entry,
359 arg,
360 };
361 // Passing a pointer to `suspend_context` is safe here, because it will remain valid until
362 // either `cpu_suspend` returns or the stack pointer is reset by `warm_boot_entry`.
363 smccc::psci::cpu_suspend::<C>(
364 power_state,
365 warm_boot_entry as u64,
366 (&raw const suspend_context) as u64,
367 )
368}