#![cfg_attr(not(feature = "rt"), expect(unused))]
use core::{ops::Range, sync::atomic::Ordering};
use portable_atomic::AtomicU32;
use procmacros::ram;
pub use self::implementation::*;
use crate::efuse::ChipRevision;
#[cfg_attr(esp32, path = "esp32/mod.rs")]
#[cfg_attr(esp32c2, path = "esp32c2/mod.rs")]
#[cfg_attr(esp32c3, path = "esp32c3/mod.rs")]
#[cfg_attr(esp32c5, path = "esp32c5/mod.rs")]
#[cfg_attr(esp32c6, path = "esp32c6/mod.rs")]
#[cfg_attr(esp32c61, path = "esp32c61/mod.rs")]
#[cfg_attr(esp32h2, path = "esp32h2/mod.rs")]
#[cfg_attr(esp32s2, path = "esp32s2/mod.rs")]
#[cfg_attr(esp32s3, path = "esp32s3/mod.rs")]
mod implementation;
#[allow(unused)]
pub(crate) fn is_valid_ram_address(address: usize) -> bool {
addr_in_range(address, memory_range!("DRAM"))
}
#[allow(unused)]
pub(crate) fn is_slice_in_dram<T>(slice: &[T]) -> bool {
slice_in_range(slice, memory_range!("DRAM"))
}
#[allow(unused)]
#[cfg(soc_has_psram)]
pub(crate) fn is_valid_psram_address(address: usize) -> bool {
addr_in_range(address, crate::psram::psram_range())
}
#[allow(unused)]
#[cfg(soc_has_psram)]
pub(crate) fn is_slice_in_psram<T>(slice: &[T]) -> bool {
slice_in_range(slice, crate::psram::psram_range())
}
#[allow(unused)]
pub(crate) fn is_valid_memory_address(address: usize) -> bool {
if is_valid_ram_address(address) {
return true;
}
#[cfg(soc_has_psram)]
if is_valid_psram_address(address) {
return true;
}
false
}
fn slice_in_range<T>(slice: &[T], range: Range<usize>) -> bool {
let slice = slice.as_ptr_range();
let start = slice.start as usize;
let end = slice.end as usize;
addr_in_range(start, range.clone()) && end <= range.end
}
pub(crate) fn addr_in_range(addr: usize, range: Range<usize>) -> bool {
range.contains(&addr)
}
#[cfg(feature = "rt")]
#[cfg(riscv)]
#[unsafe(export_name = "hal_main")]
fn hal_main(a0: usize, a1: usize, a2: usize) -> ! {
unsafe extern "Rust" {
fn main(a0: usize, a1: usize, a2: usize) -> !;
}
setup_stack_guard();
unsafe {
main(a0, a1, a2);
}
}
#[cfg(all(xtensa, feature = "rt"))]
mod xtensa {
use core::arch::{global_asm, naked_asm};
#[unsafe(export_name = "__init_data")]
extern "C" fn __init_data() -> bool {
false
}
extern "C" fn __init_persistent() -> bool {
matches!(
crate::system::reset_reason(),
None | Some(crate::rtc_cntl::SocResetReason::ChipPowerOn)
)
}
unsafe extern "C" {
static _rtc_fast_bss_start: u32;
static _rtc_fast_bss_end: u32;
static _rtc_fast_persistent_end: u32;
static _rtc_fast_persistent_start: u32;
static _rtc_slow_bss_start: u32;
static _rtc_slow_bss_end: u32;
static _rtc_slow_persistent_end: u32;
static _rtc_slow_persistent_start: u32;
fn _xtensa_lx_rt_zero_fill(s: *mut u32, e: *mut u32);
static mut __stack_chk_guard: u32;
}
global_asm!(
"
.literal sym_init_persistent, {__init_persistent}
.literal sym_xtensa_lx_rt_zero_fill, {_xtensa_lx_rt_zero_fill}
.literal sym_rtc_fast_bss_start, {_rtc_fast_bss_start}
.literal sym_rtc_fast_bss_end, {_rtc_fast_bss_end}
.literal sym_rtc_fast_persistent_end, {_rtc_fast_persistent_end}
.literal sym_rtc_fast_persistent_start, {_rtc_fast_persistent_start}
.literal sym_rtc_slow_bss_start, {_rtc_slow_bss_start}
.literal sym_rtc_slow_bss_end, {_rtc_slow_bss_end}
.literal sym_rtc_slow_persistent_end, {_rtc_slow_persistent_end}
.literal sym_rtc_slow_persistent_start, {_rtc_slow_persistent_start}
",
__init_persistent = sym __init_persistent,
_xtensa_lx_rt_zero_fill = sym _xtensa_lx_rt_zero_fill,
_rtc_fast_bss_end = sym _rtc_fast_bss_end,
_rtc_fast_bss_start = sym _rtc_fast_bss_start,
_rtc_fast_persistent_end = sym _rtc_fast_persistent_end,
_rtc_fast_persistent_start = sym _rtc_fast_persistent_start,
_rtc_slow_bss_end = sym _rtc_slow_bss_end,
_rtc_slow_bss_start = sym _rtc_slow_bss_start,
_rtc_slow_persistent_end = sym _rtc_slow_persistent_end,
_rtc_slow_persistent_start = sym _rtc_slow_persistent_start,
);
#[unsafe(export_name = "__post_init")]
#[unsafe(naked)]
#[allow(named_asm_labels)]
extern "C" fn post_init() {
naked_asm!(
"
entry a1, 0x10 // 4 words for callx4 spill area
l32r a2, sym_xtensa_lx_rt_zero_fill // Pre-load address of zero-fill function
l32r a6, sym_rtc_fast_bss_start // Set input range to .rtc_fast.bss
l32r a7, sym_rtc_fast_bss_end //
callx4 a2 // Zero-fill
l32r a6, sym_rtc_slow_bss_start // Set input range to .rtc_slow.bss
l32r a7, sym_rtc_slow_bss_end //
callx4 a2 // Zero-fill
l32r a3, sym_init_persistent // Do we need to initialize persistent data?
callx4 a3
beqz a6, .Lpost_init_return // If not, skip initialization
l32r a6, sym_rtc_fast_persistent_start // Set input range to .rtc_fast.persistent
l32r a7, sym_rtc_fast_persistent_end //
callx4 a2 // Zero-fill
l32r a6, sym_rtc_slow_persistent_start // Set input range to .rtc_slow.persistent
l32r a7, sym_rtc_slow_persistent_end //
callx4 a2 // Zero-fill
.Lpost_init_return:
retw.n
",
)
}
#[cfg(esp32s3)]
global_asm!(".section .rwtext,\"ax\",@progbits");
global_asm!(
"
.literal sym_stack_chk_guard, {__stack_chk_guard}
.literal stack_guard_value, {stack_guard_value}
.literal sym_esp32_init, {__esp32_init}
",
__stack_chk_guard = sym __stack_chk_guard,
stack_guard_value = const esp_config::esp_config_int!(
u32,
"ESP_HAL_CONFIG_STACK_GUARD_VALUE"
),
__esp32_init = sym esp32_init,
);
#[cfg_attr(esp32s3, unsafe(link_section = ".rwtext"))]
#[unsafe(export_name = "__pre_init")]
#[unsafe(naked)]
unsafe extern "C" fn esp32_reset() {
naked_asm! {
"
entry a1, 0x10 // 4 words for callx4 spill area
// Set up the stack protector value
l32r a2, sym_stack_chk_guard
l32r a3, stack_guard_value
s32i.n a3, a2, 0
l32r a2, sym_esp32_init
callx4 a2
retw.n
"
}
}
#[cfg_attr(esp32s3, unsafe(link_section = ".rwtext"))]
fn esp32_init() {
unsafe {
super::configure_cpu_caches();
}
crate::interrupt::setup_interrupts();
}
}
#[cfg(feature = "rt")]
#[unsafe(export_name = "__stack_chk_fail")]
unsafe extern "C" fn stack_chk_fail() {
panic!("Stack corruption detected");
}
#[cfg(all(feature = "rt", riscv))]
fn setup_stack_guard() {
unsafe extern "C" {
static mut __stack_chk_guard: u32;
}
unsafe {
let stack_chk_guard = core::ptr::addr_of_mut!(__stack_chk_guard);
stack_chk_guard.write_volatile(esp_config::esp_config_int!(
u32,
"ESP_HAL_CONFIG_STACK_GUARD_VALUE"
));
}
}
#[cfg(feature = "rt")]
pub(crate) fn ensure_stack_pointer_in_range() {
unsafe extern "C" {
static _stack_end_cpu0: u32;
static _stack_start_cpu0: u32;
}
let current_sp: usize;
cfg_if::cfg_if! {
if #[cfg(xtensa)] {
unsafe { core::arch::asm!("mov {0}, sp", out(reg) current_sp); }
} else {
unsafe { core::arch::asm!("mv {0}, sp", out(reg) current_sp); }
}
}
let stack_bottom = (&raw const _stack_end_cpu0) as usize;
let stack_top = (&raw const _stack_start_cpu0) as usize;
assert!(
current_sp > stack_bottom && current_sp <= stack_top,
"stack pointer out of range: sp=0x{:x}, bottom=0x{:x}, top=0x{:x}",
current_sp,
stack_bottom,
stack_top
);
}
#[cfg(all(feature = "rt", stack_guard_monitoring))]
pub(crate) fn enable_main_stack_guard_monitoring() {
unsafe {
unsafe extern "C" {
static mut __stack_chk_guard: u32;
}
let guard_addr = core::ptr::addr_of_mut!(__stack_chk_guard) as *mut _ as u32;
crate::debugger::set_stack_watchpoint(guard_addr as usize);
}
}
#[cfg(all(riscv, write_vec_table_monitoring))]
pub(crate) fn trap_section_protected() -> bool {
cfg!(stack_guard_monitoring_with_debugger_connected) || !crate::debugger::debugger_connected()
}
#[cfg(all(riscv, write_vec_table_monitoring))]
pub(crate) fn setup_trap_section_protection() {
if !trap_section_protected() {
return;
}
unsafe extern "C" {
static _rwtext_len: u32;
static _trap_section_origin: u32;
}
let rwtext_len = core::ptr::addr_of!(_rwtext_len) as usize;
let len = 1 << (usize::BITS - rwtext_len.leading_zeros() - 1) as usize;
if len == 0 {
warn!("No trap vector protection available");
return;
}
let addr = core::ptr::addr_of!(_trap_section_origin) as usize;
unsafe {
crate::debugger::set_watchpoint(1, addr, len);
}
}
static CHIP_REVISION: AtomicU32 = AtomicU32::new(0);
const LOADED: u32 = 1 << 31;
const MAX_REVISION: ChipRevision = ChipRevision::from_packed(0xFFFF);
#[cold]
fn load_chip_revision_from_efuse() -> u16 {
let chip_revision = crate::efuse::chip_revision();
let chip_revision = chip_revision.packed();
CHIP_REVISION.store(chip_revision as u32 | LOADED, Ordering::Release);
chip_revision
}
#[ram]
fn load_chip_revision() -> ChipRevision {
let stored = CHIP_REVISION.load(Ordering::Acquire);
if stored & LOADED == 0 {
return ChipRevision::from_packed(load_chip_revision_from_efuse());
}
ChipRevision::from_packed((stored & u16::MAX as u32) as u16)
}
fn chip_revision_in_range(range: Range<ChipRevision>) -> bool {
const BUILD_TIME_MIN_REV: ChipRevision = ChipRevision::from_combined(
esp_config::esp_config_int!(u16, "ESP_HAL_CONFIG_MIN_CHIP_REVISION"),
);
#[allow(
clippy::absurd_extreme_comparisons,
reason = "Not absurd depending on configuration"
)]
if range.end < BUILD_TIME_MIN_REV {
return false;
}
#[allow(
clippy::absurd_extreme_comparisons,
reason = "Not absurd depending on configuration"
)]
if range.start <= BUILD_TIME_MIN_REV && range.end == MAX_REVISION {
return true;
}
let chip_revision = load_chip_revision();
range.start <= chip_revision && chip_revision < range.end
}
#[allow(dead_code)]
pub(crate) fn chip_revision_above(revision: ChipRevision) -> bool {
chip_revision_in_range(revision..MAX_REVISION)
}
#[allow(dead_code)]
pub(crate) fn chip_minor_revision_above(revision: ChipRevision) -> bool {
let next_major = ChipRevision {
major: revision.major + 1,
minor: 0,
};
chip_revision_in_range(revision..next_major)
}