Skip to main content

aarch32_rt/
lib.rs

1//! # Run-time support for AArch32 Processors
2//!
3//! This library implements a simple Arm vector table, suitable for getting into
4//! a Rust application running in System Mode. It also provides a reference
5//! start up method. Most AArch32 based systems will require chip specific
6//! start-up code, so the start-up method can be overridden.
7//!
8//! The default startup routine provided by this crate does not include any
9//! special handling for multi-core support because this is oftentimes
10//! implementation defined and the exact handling depends on the specific chip
11//! in use. Many implementations only run the startup routine with one core and
12//! will keep other cores in reset until they are woken up by an implementation
13//! specific mechanism. For other implementations where multi-core specific
14//! startup adaptions are necessary, the startup routine can be overwritten by
15//! the user.
16//!
17//! ## Features
18//!
19//! - `eabi-fpu`: Enables the FPU, even if you selected a soft-float ABI target.
20//! - `fpu-d32`: Make the interrupt context store routines save the upper
21//!   double-precision registers.
22//!
23//!   If your program is using all 32 double-precision registers (e.g. if you
24//!   have set the `+d32` target feature) then you need to enable this option
25//!   otherwise important FPU state may be lost when an exception occurs.
26//!
27//! ## Information about the Run-Time
28//!
29//! Transferring from System Mode to User Mode (i.e. implementing an RTOS) is
30//! not handled here.
31//!
32//! If your processor starts in Hyp mode, this runtime will be transfer it to
33//! System mode. If you wish to write a hypervisor, you will need to replace
34//! this library with something more advanced.
35//!
36//! We assume that a set of symbols exist, either for constants or for C
37//! compatible functions or for naked raw-assembly functions. They are described
38//! in the next three sections.
39//!
40//! ## Constants
41//!
42//! * `_num_cores` - the number of CPU core (and hence the number of copies of
43//!   each stack). Must be > 0.
44//! * `__sbss` - the start of zero-initialised data in RAM. Must be 4-byte
45//!   aligned.
46//! * `__ebss` - the end of zero-initialised data in RAM. Must be 4-byte
47//!   aligned.
48//! * `_fiq_stack_size` - the number of bytes to be reserved for stack space
49//!   when in FIQ mode; will be padded to a multiple of 8.
50//! * `_irq_stack_size` - the number of bytes to be reserved for stack space
51//!   when in FIQ mode; will be padded to a multiple of 8.
52//! * `_svc_stack_size` - the number of bytes to be reserved for stack space
53//!   when in SVC mode; will be padded to a multiple of 8.
54//! * `_und_stack_size` - the number of bytes to be reserved for stack space
55//!   when in Undefined mode; will be padded to a multiple of 8.
56//! * `_abt_stack_size` - the number of bytes to be reserved for stack space
57//!   when in Abort mode; will be padded to a multiple of 8.
58//! * `_hyp_stack_size` - the number of bytes to be reserved for stack space
59//!   when in Hyp mode; will be padded to a multiple of 8.
60//! * `_sys_stack_size` - the number of bytes to be reserved for stack space
61//!   when in System mode; will be padded to a multiple of 8.
62//! * `__sdata` - the start of initialised data in RAM. Must be 4-byte aligned.
63//! * `__edata` - the end of initialised data in RAM. Must be 4-byte aligned.
64//! * `__sidata` - the start of the initialisation values for data, in read-only
65//!   memory. Must be 4-byte aligned.
66//!
67//! Using our default start-up function `_default_start`, the memory between
68//! `__sbss` and `__ebss` is zeroed, and the memory between `__sdata` and
69//! `__edata` is initialised with the data found at `__sidata`.
70//!
71//! ## Stacks
72//!
73//! Stacks are located in `.stacks` section which is mapped to the `STACKS`
74//! memory region. Per default, the stacks are pushed to the end of the `STACKS`
75//! by a filler section. We allocate stacks for each core, based on the
76//! `_num_cores` linker symbol.
77//!
78//! The stacks look like:
79//!
80//! ```text
81//! +------------------+ <---- ORIGIN(STACKS) + LENGTH(STACKS)
82//! |     SYS Stack    | } _sys_stack_size * _num_cores bytes
83//! +------------------+
84//! |     FIQ Stack    | } _fiq_stack_size * _num_cores bytes
85//! +------------------+
86//! |     IRQ Stack    | } _irq_stack_size * _num_cores bytes
87//! +------------------+
88//! |     HYP Stack    | } _hyp_stack_size * _num_cores bytes (only used on Armv8-R)
89//! +------------------+
90//! |     ABT Stack    | } _abt_stack_size * _num_cores bytes
91//! +------------------+
92//! |     SVC Stack    | } _svc_stack_size * _num_cores bytes
93//! +------------------+
94//! |     UND Stack    | } _und_stack_size * _num_cores bytes
95//! +------------------+
96//! |  filler section  |
97//! +------------------+ <---- ORIGIN(STACKS)
98//! ```
99//!
100//! Our linker script PROVIDEs a symbol `_pack_stacks`. By setting this symbol
101//! to 0 in memory.x, the stacks can be moved to the beginning of the `STACKS`
102//! region or the end of the previous section located in STACKS or its alias.
103//!
104//! ## C-Compatible Functions
105//!
106//! ### Main Function
107//!
108//! The symbol `kmain` should be an `extern "C"` function. It is called in SYS
109//! mode after all the global variables have been initialised. There is no
110//! default - this function is mandatory.
111//!
112//! ```rust
113//! #[unsafe(no_mangle)]
114//! extern "C" fn kmain() -> ! {
115//!     loop { }
116//! }
117//! ```
118//!
119//! You can also create a 'kmain' function by using the `#[entry]` attribute on
120//! a normal Rust function. The function will be renamed in such a way that the
121//! start-up assembly code can find it, but normal Rust code cannot. Therefore
122//! you can be assured that the function will only be called once (unless
123//! someone resorts to `unsafe` Rust to import the `kmain` symbol as an `extern
124//! "C" fn`).
125//!
126//! ```rust
127//! use aarch32_rt::entry;
128//!
129//! #[entry]
130//! fn my_main() -> ! {
131//!     loop { }
132//! }
133//! ```
134//!
135//! ### Undefined Handler
136//!
137//! The symbol `_undefined_handler` should be an `extern "C"` function. It is
138//! called in UND mode when an [Undefined Instruction Exception] occurs.
139//!
140//! [Undefined Instruction Exception]:
141//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/Undefined-Instruction-exception?lang=en
142//!
143//! Our linker script PROVIDEs a default `_undefined_handler` symbol which is an
144//! alias for the `_default_handler` function. You can override it by defining
145//! your own `_undefined_handler` function, like:
146//!
147//! ```rust
148//! /// Does not return
149//! #[unsafe(no_mangle)]
150//! extern "C" fn _undefined_handler(addr: usize) -> ! {
151//!     loop { }
152//! }
153//! ```
154//!
155//! or:
156//!
157//! ```rust
158//! /// Execution will continue from the returned address.
159//! ///
160//! /// Return `addr` to go back and execute the faulting instruction again.
161//! #[unsafe(no_mangle)]
162//! unsafe extern "C" fn _undefined_handler(addr: usize) -> usize {
163//!     // do stuff here, then return to the address *after* the one
164//!     // that failed
165//!     addr + 4
166//! }
167//! ```
168//!
169//! You can create a `_undefined_handler` function by using the
170//! `#[exception(Undefined)]` attribute on a Rust function with the appropriate
171//! arguments and return type.
172//!
173//! ```rust
174//! use aarch32_rt::exception;
175//!
176//! #[exception(Undefined)]
177//! fn my_handler(addr: usize) -> ! {
178//!     loop { }
179//! }
180//! ```
181//!
182//! or:
183//!
184//! ```rust
185//! use aarch32_rt::exception;
186//!
187//! #[exception(Undefined)]
188//! unsafe fn my_handler(addr: usize) -> usize {
189//!     // do stuff here, then return the address to return to
190//!     addr + 4
191//! }
192//! ```
193//!
194//! ### Supervisor Call Handler
195//!
196//! The symbol `_svc_handler` should be an `extern "C"` function. It is called
197//! in SVC mode when an [Supervisor Call Exception] occurs.
198//!
199//! [Supervisor Call Exception]:
200//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/Supervisor-Call--SVC--exception?lang=en
201//!
202//! Returning from this function will cause execution to resume at the function
203//! the triggered the exception, immediately after the SVC instruction. You
204//! cannot control where execution resumes. The function is passed the literal
205//! integer argument to the `svc` instruction, which is extracted from the
206//! machine code for you by the default assembly trampoline, along with
207//! registers r0 through r5, in the form of a reference to a `Frame` structure.
208//!
209//! Our linker script PROVIDEs a default `_svc_handler` symbol which is an alias
210//! for the `_default_handler` function. You can override it by defining your
211//! own `_svc_handler` function, like:
212//!
213//! ```rust
214//! #[unsafe(no_mangle)]
215//! extern "C" fn _svc_handler(arg: u32, frame: &aarch32_rt::Frame) -> u32 {
216//!     // do stuff here
217//!     todo!()
218//! }
219//! ```
220//!
221//! You can also create a `_svc_handler` function by using the
222//! `#[exception(SupervisorCall)]` attribute on a normal Rust function.
223//!
224//! ```rust
225//! use aarch32_rt::exception;
226//!
227//! #[exception(SupervisorCall)]
228//! fn svc_handler(arg: u32, frame: &aarch32_rt::Frame) -> u32 {
229//!     // do stuff here
230//!     todo!()
231//! }
232//! ```
233//!
234//! ### Hypervisor Call Handler
235//!
236//! The symbol `_hvc_handler` should be an `extern "C"` function. It is called
237//! in HYP mode when an [Hypervisor Call Exception] occurs.
238//!
239//! [Hypervisor Call Exception]:
240//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/Hypervisor-Call--HVC--exception?lang=en
241//!
242//! Returning from this function will cause execution to resume at the function
243//! the triggered the exception, immediately after the HVC instruction. You
244//! cannot control where execution resumes. The function is passed contents of
245//! the Hypervisor Syndrome Register (HSR) register, which is fetched by the
246//! default assembly trampoline, along with registers r0 through r5, in the form
247//! of a reference to a `Frame` structure.
248//!
249//! Our linker script PROVIDEs a default `_hvc_handler` symbol which is an alias
250//! for the `_default_handler` function. You can override it by defining your
251//! own `_hvc_handler` function, like:
252//!
253//! ```rust
254//! #[unsafe(no_mangle)]
255//! extern "C" fn _hvc_handler(hsr: u32, frame: &aarch32_rt::Frame) -> u32 {
256//!     // do stuff here
257//!     todo!()
258//! }
259//! ```
260//!
261//! You can also create a `_hvc_handler` function by using the
262//! `#[exception(HypervisorCall)]` attribute on a normal Rust function.
263//!
264//! ```rust
265//! use aarch32_rt::exception;
266//!
267//! #[exception(HypervisorCall)]
268//! fn my_hvc_handler(hsr: u32, frame: &aarch32_rt::Frame) -> u32 {
269//!     // do stuff here
270//!     todo!()
271//! }
272//! ```
273//!
274//! If you wish to inspect the HSR value, you can use the `aarch32-cpu` crate:
275//!
276//! ```rust,ignore
277//! let hsr = aarch32_cpu::register::Hsr::new_with_raw_value(hsr);
278//! ```
279//!
280//! ### Prefetch Abort Handler
281//!
282//! The symbol `_prefetch_abort_handler` should be an `extern "C"` function. It
283//! is called in ABT mode when a [Prefetch Abort Exception] occurs.
284//!
285//! [Prefetch Abort Exception]:
286//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/Prefetch-Abort-exception?lang=en
287//!
288//! Our linker script PROVIDEs a default `_prefetch_abort_handler` symbol which
289//! is an alias for the `_default_handler` function. You can override it by
290//! defining your own `_undefined_handler` function.
291//!
292//! This function takes the address of faulting instruction, and can either not
293//! return:
294//!
295//! ```rust
296//! #[unsafe(no_mangle)]
297//! extern "C" fn _prefetch_abort_handler(addr: usize) -> ! {
298//!     loop { }
299//! }
300//! ```
301//!
302//! Or it can return an address where execution should resume after the
303//! Exception handler is complete (which is unsafe):
304//!
305//! ```rust
306//! #[unsafe(no_mangle)]
307//! unsafe extern "C" fn _prefetch_abort_handler(addr: usize) -> usize {
308//!     // do stuff, then go back to the instruction after the one that failed
309//!     addr + 4
310//! }
311//! ```
312//!
313//! You can create a `_prefetch_abort_handler` function by using the
314//! `#[exception(PrefetchAbort)]` macro on a Rust function with the appropriate
315//! arguments and return type.
316//!
317//! ```rust
318//! use aarch32_rt::exception;
319//!
320//! #[exception(PrefetchAbort)]
321//! fn my_handler(addr: usize) -> ! {
322//!     loop { }
323//! }
324//! ```
325//!
326//! or:
327//!
328//! ```rust
329//! use aarch32_rt::exception;
330//!
331//! #[exception(PrefetchAbort)]
332//! unsafe fn my_handler(addr: usize) -> usize {
333//!     // do stuff, then go back to the instruction after the one that failed
334//!     addr + 4
335//! }
336//! ```
337//!
338//! ### Data Abort Handler
339//!
340//! The symbol `_data_abort_handler` should be an `extern "C"` function. It is
341//! called in ABT mode when a Data Abort Exception occurs.
342//!
343//! [Data Abort Exception]:
344//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/Data-Abort-exception?lang=en
345//!
346//! Our linker script PROVIDEs a default `_data_abort_handler` symbol which is
347//! an alias for the `_default_handler` function. You can override it by
348//! defining your own `_undefined_handler` function.
349//!
350//! This function takes the address of faulting instruction, and can either not
351//! return:
352//!
353//! ```rust
354//! #[unsafe(no_mangle)]
355//! extern "C" fn _data_abort_handler(addr: usize) -> ! {
356//!     loop { }
357//! }
358//! ```
359//!
360//! Or it can return an address where execution should resume after the
361//! Exception handler is complete (which is unsafe):
362//!
363//! ```rust
364//! #[unsafe(no_mangle)]
365//! unsafe extern "C" fn _data_abort_handler(addr: usize) -> usize {
366//!     // do stuff, then go back to the instruction after the one that failed
367//!     addr + 4
368//! }
369//! ```
370//!
371//! You can create a `_data_abort_handler` function by using the
372//! `#[exception(DataAbort)]` macro on a Rust function with the appropriate
373//! arguments and return type.
374//!
375//! ```rust
376//! use aarch32_rt::exception;
377//!
378//! #[exception(DataAbort)]
379//! fn my_handler(addr: usize) -> ! {
380//!     loop { }
381//! }
382//! ```
383//!
384//! or:
385//!
386//! ```rust
387//! use aarch32_rt::exception;
388//!
389//! #[exception(DataAbort)]
390//! unsafe fn my_handler(addr: usize) -> usize {
391//!     // do stuff, then go back to the instruction after the one that failed
392//!     addr + 4
393//! }
394//! ```
395//!
396//! ### IRQ Handler
397//!
398//! The symbol `_irq_handler` should be an `extern "C"` function. It is called
399//! in SYS mode (not IRQ mode!) when an [Interrupt] occurs.
400//!
401//! [Interrupt]:
402//!     https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/The-System-Level-Programmers--Model/Exception-descriptions/IRQ-exception?lang=en
403//!
404//! Returning from this function will cause execution to resume at wherever it
405//! was interrupted. You cannot control where execution resumes.
406//!
407//! This function is entered with interrupts masked, but you may unmask (i.e.
408//! enable) interrupts inside this function if desired. You will probably want
409//! to talk to your interrupt controller first, otherwise you'll just keep
410//! re-entering this interrupt handler recursively until you stack overflow.
411//!
412//! Our linker script PROVIDEs a default `_irq_handler` symbol which is an alias
413//! for `_default_handler`. You can override it by defining your own
414//! `_irq_handler` function.
415//!
416//! Expected prototype:
417//!
418//! ```rust
419//! #[unsafe(no_mangle)]
420//! extern "C" fn _irq_handler() {
421//!     // 1. Talk to interrupt controller
422//!     // 2. Handle interrupt
423//!     // 3. Clear interrupt
424//! }
425//! ```
426//!
427//! You can also create a `_irq_handler` function by using the `#[irq]`
428//! attribute on a normal Rust function.
429//!
430//! ```rust
431//! use aarch32_rt::irq;
432//!
433//! #[irq]
434//! fn my_irq_handler() {
435//!     // 1. Talk to interrupt controller
436//!     // 2. Handle interrupt
437//!     // 3. Clear interrupt
438//! }
439//! ```
440//!
441//! ## ASM functions
442//!
443//! These are the naked 'raw' assembly functions the run-time requires:
444//!
445//! * `_start` - a Reset handler. Our linker script PROVIDEs a default function
446//!   at `_default_start` but you can override it. The provided default start
447//!   function will initialise all global variables and then call `kmain` in SYS
448//!   mode. Some SoCs require a chip specific startup for tasks like MPU
449//!   initialization or chip specific initialization routines, so if our
450//!   start-up routine doesn't work for you, supply your own `_start` function
451//!   (but feel free to call our `_default_start` as part of it).
452//!
453//! * `_asm_undefined_handler` - a naked function to call when an Undefined
454//!   Exception occurs. Our linker script PROVIDEs a default function at
455//!   `_asm_default_undefined_handler` but you can override it. The provided
456//!   default handler will call `_undefined_handler` in UND mode, saving state
457//!   as required.
458//!
459//! * `_asm_svc_handler` - a naked function to call when an Supervisor Call
460//!   (SVC) Exception occurs. Our linker script PROVIDEs a default function at
461//!   `_asm_default_svc_handler` but you can override it. The provided default
462//!   handler will call `_svc_handler` in SVC mode, saving state as required.
463//!
464//! * `_asm_prefetch_abort_handler` - a naked function to call when a Prefetch
465//!   Abort Exception occurs. Our linker script PROVIDEs a default function at
466//!   `_asm_default_prefetch_abort_handler` but you can override it. The
467//!   provided default handler will call `_prefetch_abort_handler`, saving state
468//!   as required. Note that Prefetch Abort Exceptions are handled in Abort Mode
469//!   (ABT), Monitor Mode (MON) or Hyp Mode (HYP), depending on CPU
470//!   configuration.
471//!
472//! * `_asm_data_abort_handler` - a naked function to call when a Data Abort
473//!   Exception occurs. Our linker script PROVIDEs a default function at
474//!   `_asm_default_data_abort_handler` but you can override it. The provided
475//!   default handler will call `_data_abort_handler` in ABT mode, saving state
476//!   as required.
477//!
478//! * `_asm_irq_handler` - a naked function to call when an Undefined Exception
479//!   occurs. Our linker script PROVIDEs a default function at
480//!   `_asm_default_irq_handler` but you can override it. The provided default
481//!   handler will call `_irq_handler` in SYS mode (not IRQ mode), saving state
482//!   as required.
483//!
484//! * `_asm_fiq_handler` - a naked function to call when a Fast Interrupt
485//!   Request (FIQ) occurs. Our linker script PROVIDEs a default function at
486//!   `_asm_default_fiq_handler` but you can override it. The provided default
487//!   just spins forever.
488//!
489//! ## Outputs
490//!
491//! This library produces global symbols called:
492//!
493//! * `_vector_table` - the start of the interrupt vector table
494//! * `_default_start` - the default Reset handler, that sets up some stacks and
495//!   calls an `extern "C"` function called `kmain`.
496//! * `_asm_default_undefined_handler` - assembly language trampoline that calls
497//!   `_undefined_handler`
498//! * `_asm_default_svc_handler` - assembly language trampoline that calls
499//!   `_svc_handler`
500//! * `_asm_default_prefetch_abort_handler` - assembly language trampoline that
501//!   calls `_prefetch_abort_handler`
502//! * `_asm_default_data_abort_handler` - assembly language trampoline that
503//!   calls `_data_abort_handler`
504//! * `_asm_default_irq_handler` - assembly language trampoline that calls
505//!   `_irq_handler`
506//! * `_asm_default_fiq_handler` - an FIQ handler that just spins
507//! * `_default_handler` - a C compatible function that spins forever.
508//! * `_init_segments` - initialises `.bss` and `.data` and zeroes the stacks
509//! * `_stack_setup_preallocated` - initialises UND, SVC, ABT, IRQ, FIQ and SYS
510//!   stacks from the `.stacks` section defined in link.x, based on
511//!   _xxx_stack_size values, and the core number given in `r0`
512//! * `_xxx_stack_high_end` and `_xxx_stack_low_end` where the former is the top
513//!   and the latter the bottom of the stack for each mode (`und`, `svc`, `abt`,
514//!   `irq`, `fiq`, `sys`)
515//!
516//! The assembly language trampolines are required because AArch32 processors do
517//! not save a great deal of state on entry to an exception handler, unlike
518//! Armv7-M (and other M-Profile) processors. We must therefore save this state
519//! to the stack using assembly language, before transferring to an `extern "C"`
520//! function. Because FIQ is often performance-sensitive, we don't supply an FIQ
521//! trampoline; if you want to use FIQ, you have to write your own assembly
522//! routine, allowing you to preserve only whatever state is important to you.
523//!
524//! ## Examples
525//!
526//! You can find example code using QEMU inside the [project
527//! repository](https://github.com/rust-embedded/aarch32/tree/main/examples)
528
529#![no_std]
530
531#[cfg(target_arch = "arm")]
532use aarch32_cpu::register::{cpsr::ProcessorMode, Cpsr};
533
534#[cfg(all(
535    any(arm_architecture = "v7-a", arm_architecture = "v8-r"),
536    not(feature = "el2-mode")
537))]
538use aarch32_cpu::register::Hactlr;
539
540pub use aarch32_rt_macros::{entry, exception, irq};
541
542#[cfg(all(target_arch = "arm", arm_architecture = "v8-r", feature = "el2-mode"))]
543mod arch_v8_hyp;
544
545#[cfg(all(
546    target_arch = "arm",
547    any(
548        arm_architecture = "v7-a",
549        arm_architecture = "v7-r",
550        all(arm_architecture = "v8-r", not(feature = "el2-mode"))
551    ),
552))]
553mod arch_v7;
554
555#[cfg(all(
556    target_arch = "arm",
557    not(any(
558        arm_architecture = "v7-a",
559        arm_architecture = "v7-r",
560        arm_architecture = "v8-r"
561    ))
562))]
563mod arch_v4;
564
565pub mod sections;
566pub mod stacks;
567
568/// Our default exception handler.
569///
570/// We end up here if an exception fires and the weak 'PROVIDE' in the link.x
571/// file hasn't been over-ridden.
572#[unsafe(no_mangle)]
573pub extern "C" fn _default_handler() {
574    loop {
575        core::hint::spin_loop();
576    }
577}
578
579// The Interrupt Vector Table, and some default assembly-language handler.
580//
581// Needs to be aligned to 5bits/2^5 to be stored correctly in VBAR
582//
583// Need to be assembled as Arm-mode because the Thumb Exception bit is cleared
584#[cfg(target_arch = "arm")]
585core::arch::global_asm!(
586    r#"
587    .pushsection .vector_table,"ax",%progbits
588    .arm
589    .global _vector_table
590    .type _vector_table, %function
591    .align 5
592    _vector_table:
593        ldr     pc, =_start
594        ldr     pc, =_asm_undefined_handler
595        ldr     pc, =_asm_svc_handler
596        ldr     pc, =_asm_prefetch_abort_handler
597        ldr     pc, =_asm_data_abort_handler
598        ldr     pc, =_asm_hvc_handler
599        ldr     pc, =_asm_irq_handler
600        ldr     pc, =_asm_fiq_handler
601    .size _vector_table, . - _vector_table
602    .popsection
603    "#
604);
605
606/// Arguments stacked on interrupt
607///
608/// This struct is very carefully designed to match the layout of the
609/// registers pushed to the stack in our SVC handler.
610#[derive(Debug, Clone, PartialEq, Eq)]
611#[repr(C)]
612pub struct Frame {
613    pub r0: u32,
614    pub r1: u32,
615    pub r2: u32,
616    pub r3: u32,
617    pub r4: u32,
618    pub r5: u32,
619}
620
621/// This macro expands to code for saving FPU context on entry to an exception
622/// handler. It pushes a multiple of eight bytes to preserve AAPCS alignment.
623/// It may damage R0-R3.
624///
625/// It should match `restore_fpu_context!`
626///
627/// On entry to this block, we assume that we are in exception context.
628#[cfg(not(any(target_abi = "eabihf", feature = "eabi-fpu")))]
629#[macro_export]
630macro_rules! save_fpu_context {
631    () => {
632        ""
633    };
634}
635
636/// This macro expands to code for restoring context on exit from an exception
637/// handler.
638///
639/// It should match `save_fpu_context!`.
640#[cfg(not(any(target_abi = "eabihf", feature = "eabi-fpu")))]
641#[macro_export]
642macro_rules! restore_fpu_context {
643    () => {
644        ""
645    };
646}
647
648/// This macro expands to code for saving FPU context on entry to an exception
649/// handler. It pushes a multiple of eight bytes to preserve AAPCS alignment.
650/// It may damage R0-R3.
651///
652/// It should match `restore_fpu_context!`
653///
654/// On entry to this block, we assume that we are in exception context.
655///
656/// This version saves FPU state, assuming 16 DP registers (a 'D16' or 'D16SP'
657/// FPU configuration). Note that SP-only FPUs still have DP registers
658/// - each DP register holds two SP values.
659///
660/// EABI specifies D8-D15 as callee-save, and so we don't
661/// preserve them because any C function we call to handle the exception will
662/// preserve/restore them itself as required.
663#[cfg(all(
664    any(target_abi = "eabihf", feature = "eabi-fpu"),
665    not(feature = "fpu-d32")
666))]
667#[macro_export]
668macro_rules! save_fpu_context {
669    () => {
670        r#"
671        // save all D16 FPU context, except D8-D15
672        vpush   {{ d0-d7 }}
673        vmrs    r0, FPSCR
674        vmrs    r1, FPEXC
675        push    {{ r0-r1 }}
676        "#
677    };
678}
679
680/// This macro expands to code for restoring context on exit from an exception
681/// handler. It restores FPU state, assuming 16 DP registers (a 'D16' or
682/// 'D16SP' FPU configuration).
683///
684/// It should match `save_fpu_context!`.
685#[cfg(all(
686    any(target_abi = "eabihf", feature = "eabi-fpu"),
687    not(feature = "fpu-d32")
688))]
689#[macro_export]
690macro_rules! restore_fpu_context {
691    () => {
692        r#"
693        // restore all D16 FPU context, except D8-D15
694        pop     {{ r0-r1 }}
695        vmsr    FPEXC, r1
696        vmsr    FPSCR, r0
697        vpop    {{ d0-d7 }}
698        "#
699    };
700}
701
702/// This macro expands to code for saving FPU context on entry to an exception
703/// handler. It pushes a multiple of eight bytes to preserve AAPCS alignment.
704/// It may damage R0-R3.
705///
706/// It should match `restore_fpu_context!`
707///
708/// On entry to this block, we assume that we are in exception context.
709///
710/// This version saves FPU state assuming 32 DP registers (a 'D32' FPU
711/// configuration).
712///
713/// EABI specifies D8-D15 as callee-save, and so we don't
714/// preserve them because any C function we call to handle the exception will
715/// preserve/restore them itself as required.
716#[cfg(all(any(target_abi = "eabihf", feature = "eabi-fpu"), feature = "fpu-d32"))]
717#[macro_export]
718macro_rules! save_fpu_context {
719    () => {
720        r#"
721        // save all D32 FPU context, except D8-D15
722        vpush   {{ d0-d7 }}
723        vpush   {{ d16-d31 }}
724        vmrs    r0, FPSCR
725        vmrs    r1, FPEXC
726        push    {{ r0-r1 }}
727        "#
728    };
729}
730
731/// This macro expands to code for restoring context on exit from an exception
732/// handler. It restores FPU state, assuming 32 DP registers (a 'D32' FPU
733/// configuration).
734///
735/// It should match `save_fpu_context!`.
736#[cfg(all(any(target_abi = "eabihf", feature = "eabi-fpu"), feature = "fpu-d32"))]
737#[macro_export]
738macro_rules! restore_fpu_context {
739    () => {
740        r#"
741        // restore all D32 FPU context, except D8-D15
742        pop     {{ r0-r1 }}
743        vmsr    FPEXC, r1
744        vmsr    FPSCR, r0
745        vpop    {{ d16-d31 }}
746        vpop    {{ d0-d7 }}
747        "#
748    };
749}
750
751// Generic FIQ placeholder that's just a spin-loop
752#[cfg(target_arch = "arm")]
753core::arch::global_asm!(
754    r#"
755    .pushsection .text._asm_default_fiq_handler
756
757    // Our default FIQ handler
758    .global _asm_default_fiq_handler
759    .type _asm_default_fiq_handler, %function
760    _asm_default_fiq_handler:
761        b       _asm_default_fiq_handler
762    .size    _asm_default_fiq_handler, . - _asm_default_fiq_handler
763    .popsection
764    "#,
765);
766
767/// This macro expands to code to turn on the FPU
768#[cfg(all(target_arch = "arm", any(target_abi = "eabihf", feature = "eabi-fpu")))]
769macro_rules! fpu_enable {
770    () => {
771        r#"
772        // Allow VFP coprocessor access
773        mrc     p15, 0, r0, c1, c0, 2
774        orr     r0, r0, #0xF00000
775        mcr     p15, 0, r0, c1, c0, 2
776        // Enable VFP
777        mov     r0, #0x40000000
778        vmsr    fpexc, r0
779        "#
780    };
781}
782
783/// This macro expands to code that does nothing because there is no FPU
784#[cfg(all(
785    target_arch = "arm",
786    not(any(target_abi = "eabihf", feature = "eabi-fpu"))
787))]
788macro_rules! fpu_enable {
789    () => {
790        r#"
791        // no FPU - do nothing
792        "#
793    };
794}
795
796// Start-up code for Armv7-R (and Armv8-R once we've left EL2)
797// Stack location and sizes are taken from sections defined in linker script
798// We set up our stacks and `kmain` in system mode.
799#[cfg(target_arch = "arm")]
800core::arch::global_asm!(
801    r#"
802    // Work around https://github.com/rust-lang/rust/issues/127269
803    .fpu vfp2
804
805    // Configure a stack for every mode. Leaves you in sys mode.
806    //
807    // Pass the core number in r0
808    .pushsection .text._stack_setup_preallocated
809    .global _stack_setup_preallocated
810    .arm
811    .type _stack_setup_preallocated, %function
812    _stack_setup_preallocated:
813        // Save LR from whatever mode we're currently in
814        mov     r3, lr
815        // (we might not be in the same mode when we return).
816        // Set stack pointer and mask interrupts for UND mode (Mode 0x1B)
817        msr     cpsr_c, {und_mode}
818        ldr	    r2, =_und_stack_high_end
819        ldr	    r1, =_und_stack_size
820        muls    r1, r1, r0
821        subs    sp, r2, r1
822        // Set stack pointer (right after) and mask interrupts for SVC mode (Mode 0x13)
823        msr     cpsr_c, {svc_mode}
824        ldr	    r2, =_svc_stack_high_end
825        ldr	    r1, =_svc_stack_size
826        muls    r1, r1, r0
827        subs    sp, r2, r1
828        // Set stack pointer (right after) and mask interrupts for ABT mode (Mode 0x17)
829        msr     cpsr_c, {abt_mode}
830        ldr	    r2, =_abt_stack_high_end
831        ldr	    r1, =_abt_stack_size
832        muls    r1, r1, r0
833        subs    sp, r2, r1
834        // Set stack pointer (right after) and mask interrupts for IRQ mode (Mode 0x12)
835        msr     cpsr_c, {irq_mode}
836        ldr	    r2, =_irq_stack_high_end
837        ldr	    r1, =_irq_stack_size
838        muls    r1, r1, r0
839        subs    sp, r2, r1
840        // Set stack pointer (right after) and mask interrupts for FIQ mode (Mode 0x11)
841        msr     cpsr_c, {fiq_mode}
842        ldr	    r2, =_fiq_stack_high_end
843        ldr	    r1, =_fiq_stack_size
844        muls    r1, r1, r0
845        subs    sp, r2, r1
846        // Set stack pointer (right after) and mask interrupts for System mode (Mode 0x1F)
847        msr     cpsr_c, {sys_mode}
848        ldr	    r2, =_sys_stack_high_end
849        ldr	    r1, =_sys_stack_size
850        muls    r1, r1, r0
851        subs    sp, r2, r1
852        // Clear the Thumb Exception bit because all vector table is written in Arm assembly
853        // even on Thumb targets.
854        mrc     p15, 0, r1, c1, c0, 0
855        bic     r1, #{te_bit}
856        mcr     p15, 0, r1, c1, c0, 0
857        // return to caller
858        bx      r3
859    .size _stack_setup_preallocated, . - _stack_setup_preallocated
860    .popsection
861
862    // Initialises stacks, .data and .bss
863    .pushsection .text._init_segments
864    .arm
865    .global _init_segments
866    .type _init_segments, %function
867    _init_segments:
868        // Zero .bss
869        ldr     r0, =__sbss
870        ldr     r1, =__ebss
871        mov     r2, 0
872    0:
873        cmp     r1, r0
874        beq     1f
875        stm     r0!, {{r2}}
876        b       0b
877    1:
878        // Zero the stacks
879        ldr     r0, =_stacks_low_end
880        ldr     r1, =_stacks_high_end
881        mov     r2, 0
882    0:
883        cmp     r1, r0
884        beq     1f
885        stm     r0!, {{r2}}
886        b       0b
887    1:
888        // Initialise .data
889        ldr     r0, =__sdata
890        ldr     r1, =__edata
891        ldr     r2, =__sidata
892    0:
893        cmp     r1, r0
894        beq     1f
895        ldm     r2!, {{r3}}
896        stm     r0!, {{r3}}
897        b       0b
898    1:
899    	// return to caller
900        bx      lr
901    .size _init_segments, . - _init_segments
902    .popsection
903    "#,
904    und_mode = const {
905        Cpsr::new_with_raw_value(0)
906            .with_mode(ProcessorMode::Und)
907            .with_i(true)
908            .with_f(true)
909            .raw_value()
910    },
911    svc_mode = const {
912        Cpsr::new_with_raw_value(0)
913            .with_mode(ProcessorMode::Svc)
914            .with_i(true)
915            .with_f(true)
916            .raw_value()
917    },
918    abt_mode = const {
919        Cpsr::new_with_raw_value(0)
920            .with_mode(ProcessorMode::Abt)
921            .with_i(true)
922            .with_f(true)
923            .raw_value()
924    },
925    fiq_mode = const {
926        Cpsr::new_with_raw_value(0)
927            .with_mode(ProcessorMode::Fiq)
928            .with_i(true)
929            .with_f(true)
930            .raw_value()
931    },
932    irq_mode = const {
933        Cpsr::new_with_raw_value(0)
934            .with_mode(ProcessorMode::Irq)
935            .with_i(true)
936            .with_f(true)
937            .raw_value()
938    },
939    sys_mode = const {
940        Cpsr::new_with_raw_value(0)
941            .with_mode(ProcessorMode::Sys)
942            .with_i(true)
943            .with_f(true)
944            .raw_value()
945    },
946    te_bit = const {
947        aarch32_cpu::register::Sctlr::new_with_raw_value(0)
948            .with_te(true)
949            .raw_value()
950    }
951);
952
953// Start-up code for CPUs that boot into EL1
954//
955// Go straight to our default routine
956#[cfg(all(
957    target_arch = "arm",
958    not(any(arm_architecture = "v7-a", arm_architecture = "v8-r"))
959))]
960core::arch::global_asm!(
961    r#"
962    // Work around https://github.com/rust-lang/rust/issues/127269
963    .fpu vfp2
964
965    .pushsection .text.default_start
966    .arm
967    .global _default_start
968    .type _default_start, %function
969    _default_start:
970        // Init .data and .bss
971        bl      _init_segments
972        // Set up stacks.
973        mov     r0, #0
974        bl      _stack_setup_preallocated
975    "#,
976    fpu_enable!(),
977    r#"
978        // Zero all registers before calling kmain
979        mov     r0, 0
980        mov     r1, 0
981        mov     r2, 0
982        mov     r3, 0
983        mov     r4, 0
984        mov     r5, 0
985        mov     r6, 0
986        mov     r7, 0
987        mov     r8, 0
988        mov     r9, 0
989        mov     r10, 0
990        mov     r11, 0
991        mov     r12, 0
992        // Jump to application
993        bl      kmain
994        // In case the application returns, loop forever
995        b       .
996    .size _default_start, . - _default_start
997    .popsection
998    "#
999);
1000
1001// Start-up code for Armv8-R to switch to EL1.
1002//
1003// There's only one Armv8-R CPU (the Cortex-R52) and the FPU is mandatory, so we
1004// always enable it.
1005//
1006// We boot into EL2, set up a stack pointer, and run `kmain` in EL1.
1007#[cfg(all(
1008    any(arm_architecture = "v7-a", arm_architecture = "v8-r"),
1009    not(feature = "el2-mode")
1010))]
1011core::arch::global_asm!(
1012    r#"
1013    // Work around https://github.com/rust-lang/rust/issues/127269
1014    .fpu vfp2
1015    .cpu cortex-r52
1016
1017    .pushsection .text.default_start
1018    .arm
1019    .global _default_start
1020    .type _default_start, %function
1021    _default_start:
1022        // Are we in EL2? If not, skip the EL2 setup portion
1023        mrs     r0, cpsr
1024        and     r0, r0, 0x1F
1025        cmp     r0, {cpsr_mode_hyp}
1026        bne     1f
1027        // Set stack pointer
1028        ldr     sp, =_hyp_stack_high_end
1029        // Set the HVBAR (for EL2) to _vector_table
1030        ldr     r1, =_vector_table
1031        mcr     p15, 4, r1, c12, c0, 0
1032        // Configure HACTLR to let us enter EL1
1033        mrc     p15, 4, r1, c1, c0, 1
1034        mov     r2, {hactlr_bits}
1035        orr     r1, r1, r2
1036        mcr     p15, 4, r1, c1, c0, 1
1037        // Program the SPSR - enter system mode (0x1F) in Arm mode with IRQ, FIQ masked
1038        mov		r1, {sys_mode}
1039        msr		spsr_hyp, r1
1040        adr		r1, 1f
1041        msr		elr_hyp, r1
1042        dsb
1043        isb
1044        eret
1045    1:
1046        // Set the VBAR (for EL1) to _vector_table. NB: This isn't required on
1047        // Armv7-R because that only supports 'low' (default) or 'high'.
1048        ldr     r0, =_vector_table
1049        mcr     p15, 0, r0, c12, c0, 0
1050        // Init .data and .bss
1051        bl      _init_segments
1052        // Set up stacks.
1053        mov     r0, #0
1054        bl      _stack_setup_preallocated
1055        "#,
1056        fpu_enable!(),
1057        r#"
1058        // Zero all registers before calling kmain
1059        mov     r0, 0
1060        mov     r1, 0
1061        mov     r2, 0
1062        mov     r3, 0
1063        mov     r4, 0
1064        mov     r5, 0
1065        mov     r6, 0
1066        mov     r7, 0
1067        mov     r8, 0
1068        mov     r9, 0
1069        mov     r10, 0
1070        mov     r11, 0
1071        mov     r12, 0
1072        // Jump to application
1073        bl      kmain
1074        // In case the application returns, loop forever
1075        b       .
1076    .size _default_start, . - _default_start
1077    .popsection
1078    "#,
1079    cpsr_mode_hyp = const ProcessorMode::Hyp as u8,
1080    hactlr_bits = const {
1081        Hactlr::new_with_raw_value(0)
1082            .with_cpuactlr(true)
1083            .with_cdbgdci(true)
1084            .with_flashifregionr(true)
1085            .with_periphpregionr(true)
1086            .with_qosr(true)
1087            .with_bustimeoutr(true)
1088            .with_intmonr(true)
1089            .with_err(true)
1090            .with_testr1(true)
1091            .raw_value()
1092    },
1093    sys_mode = const {
1094        Cpsr::new_with_raw_value(0)
1095            .with_mode(ProcessorMode::Sys)
1096            .with_i(true)
1097            .with_f(true)
1098            .raw_value()
1099    }
1100);
1101
1102// Start-up code for Armv8-R to stay in EL2.
1103//
1104// There's only one Armv8-R CPU (the Cortex-R52) and the FPU is mandatory, so we
1105// always enable it.
1106//
1107// We boot into EL2, set up a HYP stack pointer, and run `kmain` in EL2.
1108#[cfg(all(arm_architecture = "v8-r", feature = "el2-mode"))]
1109core::arch::global_asm!(
1110    r#"
1111    // Work around https://github.com/rust-lang/rust/issues/127269
1112    .fpu vfp3
1113
1114    .section .text.default_start
1115    .global _default_start
1116    .type _default_start, %function
1117    _default_start:
1118        // Init .data and .bss
1119        bl      _init_segments
1120        // Set stack pointer
1121        ldr     sp, =_hyp_stack_high_end
1122        // Set the HVBAR (for EL2) to _vector_table
1123        ldr     r1, =_vector_table
1124        mcr     p15, 4, r1, c12, c0, 0
1125        // Mask IRQ and FIQ
1126        mrs     r0, CPSR
1127        orr     r0, {irq_fiq}
1128        msr     CPSR, r0
1129    "#,
1130    fpu_enable!(),
1131    r#"
1132        // Zero all registers before calling kmain
1133        mov     r0, 0
1134        mov     r1, 0
1135        mov     r2, 0
1136        mov     r3, 0
1137        mov     r4, 0
1138        mov     r5, 0
1139        mov     r6, 0
1140        mov     r7, 0
1141        mov     r8, 0
1142        mov     r9, 0
1143        mov     r10, 0
1144        mov     r11, 0
1145        mov     r12, 0
1146        // Jump to application
1147        bl      kmain
1148        // In case the application returns, loop forever
1149        b       .
1150    .size _default_start, . - _default_start
1151    "#,
1152    irq_fiq = const aarch32_cpu::register::Cpsr::new_with_raw_value(0).with_i(true).with_f(true).raw_value()
1153);
1154
1155/// LLVM intrinsic for memory barriers
1156///
1157/// Only required on Armv4T and Armv5TE, because Armv6K onwards support atomics.
1158#[unsafe(no_mangle)]
1159#[cfg(any(arm_architecture = "v4t", arm_architecture = "v5te"))]
1160pub extern "C" fn __sync_synchronize() {
1161    // we don't have a barrier instruction - the linux kernel just uses an empty inline asm block
1162    // so we do the same.
1163    unsafe {
1164        core::arch::asm!("");
1165    }
1166}