#![allow(clippy::missing_safety_doc)]
use core::mem::MaybeUninit;
#[cfg(feature = "os-irq")]
use core::sync::atomic::AtomicU8;
use core::sync::atomic::{AtomicBool, AtomicU32, Ordering};
#[cfg(feature = "nrf9160")]
use nrf9160_pac as pac;
#[cfg(feature = "nrf9120")]
use nrf9120_pac as pac;
const IPC_CONF_NUM: usize = 8;
static NOTIFY_ACTIVE: AtomicBool = AtomicBool::new(false);
#[cfg(feature = "os-irq")]
pub(crate) static OS_IRQ: AtomicU8 = AtomicU8::new(0);
#[derive(Debug, Clone)]
pub struct NrfxIpcConfig {
send_task_config: [u32; IPC_CONF_NUM],
receive_event_config: [u32; IPC_CONF_NUM],
receive_events_enabled: u32,
}
type NrfxIpcHandler = extern "C" fn(event_idx: u8, ptr: *mut u8);
#[repr(u32)]
#[derive(Debug, Copy, Clone)]
pub enum NrfxErr {
Success = 0x0BAD0000,
ErrorInternal = (0x0BAD0000 + 1),
ErrorNoMem = (0x0BAD0000 + 2),
ErrorNotSupported = (0x0BAD0000 + 3),
ErrorInvalidParam = (0x0BAD0000 + 4),
ErrorInvalidState = (0x0BAD0000 + 5),
ErrorInvalidLength = (0x0BAD0000 + 6),
ErrorTimeout = (0x0BAD0000 + 7),
ErrorForbidden = (0x0BAD0000 + 8),
ErrorNull = (0x0BAD0000 + 9),
ErrorInvalidAddr = (0x0BAD0000 + 10),
ErrorBusy = (0x0BAD0000 + 11),
ErrorAlreadyInitialized = (0x0BAD0000 + 12),
}
static LAST_ERROR: core::sync::atomic::AtomicIsize = core::sync::atomic::AtomicIsize::new(0);
static IPC_CONTEXT: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0);
static IPC_HANDLER: core::sync::atomic::AtomicUsize = core::sync::atomic::AtomicUsize::new(0);
#[no_mangle]
pub extern "C" fn nrf_modem_os_init() {
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_shutdown() {
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_errno_set(errno: isize) {
LAST_ERROR.store(errno, core::sync::atomic::Ordering::SeqCst);
}
pub fn get_last_error() -> isize {
LAST_ERROR.load(core::sync::atomic::Ordering::SeqCst)
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_busywait(usec: i32) {
if usec > 0 {
cortex_m::asm::delay((usec as u32) * 64);
}
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_timedwait(_context: u32, timeout: *mut i32) -> i32 {
if nrf_modem_os_is_in_isr() {
return -(nrfxlib_sys::NRF_EPERM as i32);
}
#[cfg(not(feature = "dect"))]
if !nrfxlib_sys::nrf_modem_is_initialized() {
return -(nrfxlib_sys::NRF_ESHUTDOWN as i32);
}
if *timeout < -2 {
0i32
} else {
loop {
nrf_modem_os_busywait(1000);
if NOTIFY_ACTIVE.swap(false, Ordering::Relaxed) {
return 0;
}
match *timeout {
-1 => continue,
0 => return -(nrfxlib_sys::NRF_EAGAIN as i32),
_ => *timeout -= 1,
}
}
}
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_event_notify() {
NOTIFY_ACTIVE.store(true, Ordering::SeqCst);
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_alloc(num_bytes_requested: usize) -> *mut u8 {
unsafe { generic_alloc(num_bytes_requested, &crate::LIBRARY_ALLOCATOR) }
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_free(ptr: *mut u8) {
generic_free(ptr, &crate::LIBRARY_ALLOCATOR);
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_shm_tx_alloc(num_bytes_requested: usize) -> *mut u8 {
unsafe { generic_alloc(num_bytes_requested, &crate::TX_ALLOCATOR) }
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_shm_tx_free(ptr: *mut u8) {
generic_free(ptr, &crate::TX_ALLOCATOR);
}
#[no_mangle]
pub unsafe extern "C" fn nrfx_ipc_config_load(p_config: *const NrfxIpcConfig) {
let config: &NrfxIpcConfig = &*p_config;
let ipc = &(*pac::IPC_NS::ptr());
for (i, value) in config.send_task_config.iter().enumerate() {
ipc.send_cnf[i].write(|w| w.bits(*value));
}
for (i, value) in config.receive_event_config.iter().enumerate() {
ipc.receive_cnf[i].write(|w| w.bits(*value));
}
ipc.intenset
.write(|w| w.bits(config.receive_events_enabled));
}
#[no_mangle]
pub extern "C" fn nrfx_ipc_init(
irq_priority: u8,
handler: NrfxIpcHandler,
p_context: usize,
) -> NrfxErr {
use cortex_m::interrupt::InterruptNumber;
let irq = pac::Interrupt::IPC;
let irq_num = usize::from(irq.number());
unsafe {
cortex_m::peripheral::NVIC::unmask(irq);
(*cortex_m::peripheral::NVIC::PTR).ipr[irq_num].write(irq_priority);
}
IPC_CONTEXT.store(p_context, core::sync::atomic::Ordering::SeqCst);
IPC_HANDLER.store(handler as usize, core::sync::atomic::Ordering::SeqCst);
NrfxErr::Success
}
#[no_mangle]
pub extern "C" fn nrfx_ipc_uninit() {
let ipc = unsafe { &(*pac::IPC_NS::ptr()) };
for i in 0..IPC_CONF_NUM {
ipc.send_cnf[i].reset();
}
for i in 0..IPC_CONF_NUM {
ipc.receive_cnf[i].reset();
}
ipc.intenset.reset();
}
#[no_mangle]
pub extern "C" fn nrfx_ipc_receive_event_enable(event_index: u8) {
let ipc = unsafe { &(*pac::IPC_NS::ptr()) };
ipc.inten
.modify(|r, w| unsafe { w.bits(r.bits() | 1 << event_index) })
}
#[no_mangle]
pub extern "C" fn nrfx_ipc_receive_event_disable(event_index: u8) {
let ipc = unsafe { &(*pac::IPC_NS::ptr()) };
ipc.inten
.modify(|r, w| unsafe { w.bits(r.bits() & !(1 << event_index)) })
}
unsafe fn generic_alloc(num_bytes_requested: usize, heap: &crate::WrappedHeap) -> *mut u8 {
let sizeof_usize = core::mem::size_of::<usize>();
let mut result = core::ptr::null_mut();
critical_section::with(|cs| {
let num_bytes_allocated = num_bytes_requested + sizeof_usize;
let layout =
core::alloc::Layout::from_size_align_unchecked(num_bytes_allocated, sizeof_usize);
if let Some(ref mut inner_alloc) = *heap.borrow(cs).borrow_mut() {
match inner_alloc.allocate_first_fit(layout) {
Ok(real_block) => {
let real_ptr = real_block.as_ptr();
core::ptr::write_volatile::<usize>(real_ptr as *mut usize, num_bytes_allocated);
result = real_ptr.add(sizeof_usize);
}
Err(_e) => {
}
}
}
});
result
}
unsafe fn generic_free(ptr: *mut u8, heap: &crate::WrappedHeap) {
let sizeof_usize = core::mem::size_of::<usize>() as isize;
critical_section::with(|cs| {
let real_ptr = ptr.offset(-sizeof_usize);
let num_bytes_allocated = core::ptr::read_volatile::<usize>(real_ptr as *const usize);
let layout = core::alloc::Layout::from_size_align_unchecked(
num_bytes_allocated,
sizeof_usize as usize,
);
if let Some(ref mut inner_alloc) = *heap.borrow(cs).borrow_mut() {
inner_alloc.deallocate(core::ptr::NonNull::new_unchecked(real_ptr), layout);
}
});
}
pub unsafe fn nrf_ipc_irq_handler() {
let events_map = (*pac::IPC_NS::ptr()).intpend.read().bits();
let handler_addr = IPC_HANDLER.load(core::sync::atomic::Ordering::SeqCst);
let handler = if handler_addr != 0 {
let handler = core::mem::transmute::<usize, NrfxIpcHandler>(handler_addr);
Some(handler)
} else {
#[cfg(feature = "defmt")]
defmt::warn!("No IPC handler registered");
None
};
let context = IPC_CONTEXT.load(core::sync::atomic::Ordering::SeqCst);
let mut bitmask = events_map;
while bitmask != 0 {
let event_idx = bitmask.trailing_zeros();
bitmask &= !(1 << event_idx);
(*pac::IPC_NS::ptr()).events_receive[event_idx as usize].write(|w| w.bits(0));
if let Some(handler) = handler {
let event_idx = event_idx
.try_into()
.expect("A u32 has less then 255 trailing zeroes");
(handler)(event_idx, context as *mut u8);
}
}
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_sem_init(
sem: *mut *mut core::ffi::c_void,
initial_count: core::ffi::c_uint,
limit: core::ffi::c_uint,
) -> core::ffi::c_int {
if sem.is_null() || initial_count > limit {
#[cfg(feature = "defmt")]
defmt::error!(
"Failed to init semaphore: {} || {} > {}",
sem.is_null(),
initial_count,
limit
);
return -(nrfxlib_sys::NRF_EINVAL as i32);
}
if (*sem).is_null() {
*sem = nrf_modem_os_alloc(core::mem::size_of::<Semaphore>()) as *mut _;
if (*sem).is_null() {
#[cfg(feature = "defmt")]
defmt::error!("Failed to init semaphore: out of memory");
return -(nrfxlib_sys::NRF_ENOMEM as i32);
}
}
*((*sem) as *mut Semaphore) = Semaphore {
max_value: limit,
current_value: AtomicU32::new(initial_count),
};
0
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_sem_give(sem: *mut core::ffi::c_void) {
unsafe {
if sem.is_null() {
return;
}
let max_value = (*(sem as *mut Semaphore)).max_value;
(*(sem as *mut Semaphore))
.current_value
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |val| {
(val < max_value).then_some(val + 1)
})
.ok();
}
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_sem_take(
sem: *mut core::ffi::c_void,
mut timeout: core::ffi::c_int,
) -> core::ffi::c_int {
unsafe {
if sem.is_null() {
return -(nrfxlib_sys::NRF_EAGAIN as i32);
}
if nrfxlib_sys::nrf_modem_os_is_in_isr() {
timeout = nrfxlib_sys::NRF_MODEM_OS_NO_WAIT as i32;
}
loop {
if (*(sem as *mut Semaphore))
.current_value
.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |val| {
if val > 0 {
Some(val - 1)
} else {
None
}
})
.is_ok()
{
return 0;
}
match timeout {
0 => return -(nrfxlib_sys::NRF_EAGAIN as i32),
nrfxlib_sys::NRF_MODEM_OS_FOREVER => {
nrf_modem_os_busywait(1000);
}
_ => {
timeout -= 1;
nrf_modem_os_busywait(1000);
}
}
}
}
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_sem_count_get(sem: *mut core::ffi::c_void) -> core::ffi::c_uint {
unsafe {
if sem.is_null() {
return 0;
}
(*(sem as *mut Semaphore))
.current_value
.load(Ordering::SeqCst)
}
}
struct Semaphore {
max_value: u32,
current_value: AtomicU32,
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_is_in_isr() -> bool {
#[cfg(feature = "os-irq")]
{
let os_irq = OS_IRQ.load(Ordering::Relaxed);
match cortex_m::peripheral::SCB::vect_active() {
cortex_m::peripheral::scb::VectActive::Interrupt { irqn } => irqn != os_irq,
_ => true,
}
}
#[cfg(not(feature = "os-irq"))]
{
cortex_m::peripheral::SCB::vect_active()
!= cortex_m::peripheral::scb::VectActive::ThreadMode
}
}
struct MutexLock {
lock: AtomicBool,
}
impl MutexLock {
pub fn lock(&self) -> bool {
matches!(
self.lock
.compare_exchange(false, true, Ordering::SeqCst, Ordering::SeqCst),
Ok(false)
)
}
pub fn unlock(&self) {
self.lock.store(false, Ordering::SeqCst);
}
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_mutex_init(
mutex: *mut *mut core::ffi::c_void,
) -> core::ffi::c_int {
if mutex.is_null() {
#[cfg(feature = "defmt")]
defmt::error!("Failed to init mutex (null argument)");
return -(nrfxlib_sys::NRF_EINVAL as i32);
}
if (*mutex).is_null() {
let p = nrf_modem_os_alloc(core::mem::size_of::<MaybeUninit<MutexLock>>())
as *mut MaybeUninit<MutexLock>;
if p.is_null() {
#[cfg(feature = "defmt")]
defmt::error!("Failed to init mutex: out of memory");
return -(nrfxlib_sys::NRF_ENOMEM as i32);
}
p.write(MaybeUninit::new(MutexLock {
lock: AtomicBool::new(false),
}));
*mutex = p as *mut core::ffi::c_void;
} else {
(*(mutex as *mut MutexLock)).unlock();
}
0
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_mutex_lock(
mutex: *mut core::ffi::c_void,
timeout: core::ffi::c_int,
) -> core::ffi::c_int {
if mutex.is_null() {
return -(nrfxlib_sys::NRF_EINVAL as i32);
}
let mutex = &*(mutex as *mut MutexLock);
let mut locked = mutex.lock();
if locked || timeout == nrfxlib_sys::NRF_MODEM_OS_NO_WAIT as i32 {
return if locked {
0
} else {
-(nrfxlib_sys::NRF_EAGAIN as i32)
};
}
let mut elapsed = 0;
const WAIT_US: core::ffi::c_int = 100;
while !locked {
nrf_modem_os_busywait(WAIT_US);
if timeout != nrfxlib_sys::NRF_MODEM_OS_FOREVER {
elapsed += WAIT_US;
if (elapsed / 1000) > timeout {
return -(nrfxlib_sys::NRF_EAGAIN as i32);
}
}
locked = mutex.lock();
}
0
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_mutex_unlock(
mutex: *mut core::ffi::c_void,
) -> core::ffi::c_int {
if mutex.is_null() {
return -(nrfxlib_sys::NRF_EINVAL as i32);
}
(*(mutex as *mut MutexLock)).unlock();
0
}
#[no_mangle]
pub unsafe extern "C" fn nrf_modem_os_log_wrapped(
_level: core::ffi::c_int,
_msg: *const core::ffi::c_char,
) {
#[cfg(all(feature = "defmt", feature = "modem-log"))]
{
let msg = core::ffi::CStr::from_ptr(_msg);
if let Ok(msg) = msg.to_str() {
defmt::trace!("Modem log <{}>: {}", _level, msg);
}
}
}
#[no_mangle]
pub extern "C" fn nrf_modem_os_logdump(
_level: core::ffi::c_int,
_strdata: *const core::ffi::c_char,
_data: *const core::ffi::c_void,
_len: core::ffi::c_int,
) {
}