esp_hal/soc/
mod.rs

1#![cfg_attr(not(feature = "rt"), expect(unused))]
2
3use core::ops::Range;
4
5pub use self::implementation::*;
6
7#[cfg_attr(esp32, path = "esp32/mod.rs")]
8#[cfg_attr(esp32c2, path = "esp32c2/mod.rs")]
9#[cfg_attr(esp32c3, path = "esp32c3/mod.rs")]
10#[cfg_attr(esp32c6, path = "esp32c6/mod.rs")]
11#[cfg_attr(esp32h2, path = "esp32h2/mod.rs")]
12#[cfg_attr(esp32s2, path = "esp32s2/mod.rs")]
13#[cfg_attr(esp32s3, path = "esp32s3/mod.rs")]
14mod implementation;
15
16#[allow(unused)]
17pub(crate) fn is_valid_ram_address(address: usize) -> bool {
18    addr_in_range(address, memory_range!("DRAM"))
19}
20
21#[allow(unused)]
22pub(crate) fn is_slice_in_dram<T>(slice: &[T]) -> bool {
23    slice_in_range(slice, memory_range!("DRAM"))
24}
25
26#[allow(unused)]
27#[cfg(psram)]
28pub(crate) fn is_valid_psram_address(address: usize) -> bool {
29    addr_in_range(address, crate::psram::psram_range())
30}
31
32#[allow(unused)]
33#[cfg(psram)]
34pub(crate) fn is_slice_in_psram<T>(slice: &[T]) -> bool {
35    slice_in_range(slice, crate::psram::psram_range())
36}
37
38#[allow(unused)]
39pub(crate) fn is_valid_memory_address(address: usize) -> bool {
40    if is_valid_ram_address(address) {
41        return true;
42    }
43    #[cfg(psram)]
44    if is_valid_psram_address(address) {
45        return true;
46    }
47
48    false
49}
50
51fn slice_in_range<T>(slice: &[T], range: Range<usize>) -> bool {
52    let slice = slice.as_ptr_range();
53    let start = slice.start as usize;
54    let end = slice.end as usize;
55    // `end` is >= `start`, so we don't need to check that `end > range.start`
56    // `end` is also one past the last element, so it can be equal to the range's
57    // end which is also one past the memory region's last valid address.
58    addr_in_range(start, range.clone()) && end <= range.end
59}
60
61pub(crate) fn addr_in_range(addr: usize, range: Range<usize>) -> bool {
62    range.contains(&addr)
63}
64
65#[cfg(feature = "rt")]
66#[cfg(riscv)]
67#[unsafe(export_name = "hal_main")]
68fn hal_main(a0: usize, a1: usize, a2: usize) -> ! {
69    unsafe extern "Rust" {
70        // This symbol will be provided by the user via `#[entry]`
71        fn main(a0: usize, a1: usize, a2: usize) -> !;
72    }
73
74    setup_stack_guard();
75
76    unsafe {
77        main(a0, a1, a2);
78    }
79}
80
81#[cfg(all(xtensa, feature = "rt"))]
82mod xtensa {
83    use core::arch::{global_asm, naked_asm};
84
85    /// The ESP32 has a first stage bootloader that handles loading program data
86    /// into the right place therefore we skip loading it again. This function
87    /// is called by xtensa-lx-rt in Reset.
88    #[unsafe(export_name = "__init_data")]
89    extern "C" fn __init_data() -> bool {
90        false
91    }
92
93    extern "C" fn __init_persistent() -> bool {
94        matches!(
95            crate::system::reset_reason(),
96            None | Some(crate::rtc_cntl::SocResetReason::ChipPowerOn)
97        )
98    }
99
100    unsafe extern "C" {
101        static _rtc_fast_bss_start: u32;
102        static _rtc_fast_bss_end: u32;
103        static _rtc_fast_persistent_end: u32;
104        static _rtc_fast_persistent_start: u32;
105
106        static _rtc_slow_bss_start: u32;
107        static _rtc_slow_bss_end: u32;
108        static _rtc_slow_persistent_end: u32;
109        static _rtc_slow_persistent_start: u32;
110
111        fn _xtensa_lx_rt_zero_fill(s: *mut u32, e: *mut u32);
112
113        static mut __stack_chk_guard: u32;
114    }
115
116    global_asm!(
117        "
118        .literal sym_init_persistent, {__init_persistent}
119        .literal sym_xtensa_lx_rt_zero_fill, {_xtensa_lx_rt_zero_fill}
120
121        .literal sym_rtc_fast_bss_start, {_rtc_fast_bss_start}
122        .literal sym_rtc_fast_bss_end, {_rtc_fast_bss_end}
123        .literal sym_rtc_fast_persistent_end, {_rtc_fast_persistent_end}
124        .literal sym_rtc_fast_persistent_start, {_rtc_fast_persistent_start}
125
126        .literal sym_rtc_slow_bss_start, {_rtc_slow_bss_start}
127        .literal sym_rtc_slow_bss_end, {_rtc_slow_bss_end}
128        .literal sym_rtc_slow_persistent_end, {_rtc_slow_persistent_end}
129        .literal sym_rtc_slow_persistent_start, {_rtc_slow_persistent_start}
130        ",
131        __init_persistent = sym __init_persistent,
132        _xtensa_lx_rt_zero_fill = sym _xtensa_lx_rt_zero_fill,
133
134        _rtc_fast_bss_end = sym _rtc_fast_bss_end,
135        _rtc_fast_bss_start = sym _rtc_fast_bss_start,
136        _rtc_fast_persistent_end = sym _rtc_fast_persistent_end,
137        _rtc_fast_persistent_start = sym _rtc_fast_persistent_start,
138
139        _rtc_slow_bss_end = sym _rtc_slow_bss_end,
140        _rtc_slow_bss_start = sym _rtc_slow_bss_start,
141        _rtc_slow_persistent_end = sym _rtc_slow_persistent_end,
142        _rtc_slow_persistent_start = sym _rtc_slow_persistent_start,
143    );
144
145    #[unsafe(export_name = "__post_init")]
146    #[unsafe(naked)]
147    #[allow(named_asm_labels)]
148    extern "C" fn post_init() {
149        naked_asm!(
150            "
151            entry a1, 0
152
153            l32r   a6, sym_xtensa_lx_rt_zero_fill      // Pre-load address of zero-fill function
154
155            l32r   a10, sym_rtc_fast_bss_start         // Set input range to .rtc_fast.bss
156            l32r   a11, sym_rtc_fast_bss_end           //
157            callx8 a6                                  // Zero-fill
158
159            l32r   a10, sym_rtc_slow_bss_start         // Set input range to .rtc_slow.bss
160            l32r   a11, sym_rtc_slow_bss_end           //
161            callx8 a6                                  // Zero-fill
162
163            l32r   a5,  sym_init_persistent            // Do we need to initialize persistent data?
164            callx8 a5
165            beqz   a10, .Lpost_init_return             // If not, skip initialization
166
167            l32r   a10, sym_rtc_fast_persistent_start  // Set input range to .rtc_fast.persistent
168            l32r   a11, sym_rtc_fast_persistent_end    //
169            callx8 a6                                  // Zero-fill
170
171            l32r   a10, sym_rtc_slow_persistent_start  // Set input range to .rtc_slow.persistent
172            l32r   a11, sym_rtc_slow_persistent_end    //
173            callx8 a6                                  // Zero-fill
174
175        .Lpost_init_return:
176            retw.n
177        ",
178        )
179    }
180
181    #[cfg(esp32s3)]
182    global_asm!(".section .rwtext,\"ax\",@progbits");
183    global_asm!(
184        "
185        .literal sym_stack_chk_guard, {__stack_chk_guard}
186        .literal stack_guard_value, {stack_guard_value}
187        ",
188        __stack_chk_guard = sym __stack_chk_guard,
189        stack_guard_value = const esp_config::esp_config_int!(
190            u32,
191            "ESP_HAL_CONFIG_STACK_GUARD_VALUE"
192        )
193    );
194
195    #[cfg_attr(esp32s3, unsafe(link_section = ".rwtext"))]
196    #[unsafe(export_name = "__pre_init")]
197    #[unsafe(naked)]
198    unsafe extern "C" fn esp32_reset() {
199        // Set up stack protector value before jumping to a rust function
200        naked_asm! {
201            "
202            entry a1, 0x20
203
204            // Set up the stack protector value
205            l32r   a2, sym_stack_chk_guard
206            l32r   a3, stack_guard_value
207            s32i.n a3, a2, 0
208
209            call8 {esp32_init}
210
211            retw.n
212            ",
213            esp32_init = sym esp32_init
214        }
215    }
216
217    #[cfg_attr(esp32s3, unsafe(link_section = ".rwtext"))]
218    fn esp32_init() {
219        unsafe {
220            super::configure_cpu_caches();
221        }
222
223        crate::interrupt::setup_interrupts();
224    }
225}
226
227#[cfg(feature = "rt")]
228#[unsafe(export_name = "__stack_chk_fail")]
229unsafe extern "C" fn stack_chk_fail() {
230    panic!("Stack corruption detected");
231}
232
233#[cfg(all(feature = "rt", riscv))]
234fn setup_stack_guard() {
235    unsafe extern "C" {
236        static mut __stack_chk_guard: u32;
237    }
238
239    unsafe {
240        let stack_chk_guard = core::ptr::addr_of_mut!(__stack_chk_guard);
241        // we _should_ use a random value but we don't have a good source for random
242        // numbers here
243        stack_chk_guard.write_volatile(esp_config::esp_config_int!(
244            u32,
245            "ESP_HAL_CONFIG_STACK_GUARD_VALUE"
246        ));
247    }
248}
249
250#[cfg(all(feature = "rt", stack_guard_monitoring))]
251pub(crate) fn enable_main_stack_guard_monitoring() {
252    unsafe {
253        unsafe extern "C" {
254            static mut __stack_chk_guard: u32;
255        }
256
257        let guard_addr = core::ptr::addr_of_mut!(__stack_chk_guard) as *mut _ as u32;
258        crate::debugger::set_stack_watchpoint(guard_addr as usize);
259    }
260}
261
262#[cfg(all(riscv, write_vec_table_monitoring))]
263pub(crate) fn setup_trap_section_protection() {
264    if !cfg!(stack_guard_monitoring_with_debugger_connected)
265        && crate::debugger::debugger_connected()
266    {
267        return;
268    }
269
270    unsafe extern "C" {
271        static _rwtext_len: u32;
272        static _trap_section_origin: u32;
273    }
274
275    let rwtext_len = core::ptr::addr_of!(_rwtext_len) as usize;
276
277    // protect as much as possible via NAPOT
278    let len = 1 << (usize::BITS - rwtext_len.leading_zeros() - 1) as usize;
279    if len == 0 {
280        warn!("No trap vector protection available");
281        return;
282    }
283
284    // protect MTVEC and trap handlers
285    // (probably plus some more bytes because of NAPOT)
286    // via watchpoint 1.
287    //
288    // Why not use PMP? On C2/C3 the bootloader locks all available PMP entries.
289    // And additionally we write to MTVEC for direct-vectoring and we write
290    // to __EXTERNAL_INTERRUPTS when setting an interrupt handler.
291    let addr = core::ptr::addr_of!(_trap_section_origin) as usize;
292
293    unsafe {
294        crate::debugger::set_watchpoint(1, addr, len);
295    }
296}