use core::cell::Cell;
use core::future::Future;
use core::num::NonZeroU32;
use core::pin::Pin;
use core::ptr::{self, NonNull};
use core::sync::atomic::{AtomicBool, Ordering};
use core::task::{Context, Poll};
#[cfg(feature = "alloc")]
extern crate alloc;
use esp_idf_sys::*;
use crate::cpu::Core;
use crate::interrupt;
#[cfg(not(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0",
esp_idf_version = "5.1"
)))]
const NO_AFFINITY: core::ffi::c_int = CONFIG_FREERTOS_NO_AFFINITY as _;
#[cfg(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0",
esp_idf_version = "5.1"
))]
const NO_AFFINITY: core::ffi::c_uint = tskNO_AFFINITY;
pub unsafe fn create(
task_handler: extern "C" fn(*mut core::ffi::c_void),
task_name: &core::ffi::CStr,
stack_size: usize,
task_arg: *mut core::ffi::c_void,
priority: u8,
pin_to_core: Option<Core>,
) -> Result<TaskHandle_t, EspError> {
let mut task: TaskHandle_t = core::ptr::null_mut();
let created = xTaskCreatePinnedToCore(
Some(task_handler),
task_name.as_ptr(),
stack_size as _,
task_arg,
priority as _,
&mut task,
pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
);
if created == 0 {
Err(EspError::from_infallible::<ESP_FAIL>())
} else {
Ok(task)
}
}
pub unsafe fn destroy(task: TaskHandle_t) {
vTaskDelete(task)
}
#[inline(always)]
#[link_section = ".iram1.interrupt_task_do_yield"]
pub fn do_yield() {
if interrupt::active() {
unsafe {
if let Some((yielder, arg)) = interrupt::get_isr_yielder() {
yielder(arg);
} else {
#[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4))]
vPortYieldFromISR();
#[cfg(all(
not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32p4)),
esp_idf_version_major = "4"
))]
vPortEvaluateYieldFromISR(0);
#[cfg(all(
not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6, esp32c61, esp32p4)),
not(esp_idf_version_major = "4")
))]
_frxt_setup_switch();
}
}
} else {
unsafe {
vPortYield();
}
}
}
#[inline(always)]
#[link_section = ".iram1.interrupt_task_current"]
pub fn current() -> Option<TaskHandle_t> {
if interrupt::active() {
None
} else {
Some(unsafe { xTaskGetCurrentTaskHandle() })
}
}
pub fn wait_notification(timeout: TickType_t) -> Option<NonZeroU32> {
let mut notification = 0_u32;
let notified =
unsafe { xTaskGenericNotifyWait(0, 0, u32::MAX, &mut notification, timeout) } != 0;
if notified {
NonZeroU32::new(notification)
} else {
None
}
}
pub unsafe fn notify_and_yield(task: TaskHandle_t, notification: NonZeroU32) -> bool {
let (notified, higher_prio_task_woken) = notify(task, notification);
if higher_prio_task_woken {
do_yield();
}
notified
}
pub unsafe fn notify(task: TaskHandle_t, notification: NonZeroU32) -> (bool, bool) {
let (notified, higher_prio_task_woken) = if interrupt::active() {
let mut higher_prio_task_woken: BaseType_t = Default::default();
let notified = xTaskGenericNotifyFromISR(
task,
0,
notification.into(),
eNotifyAction_eSetBits,
ptr::null_mut(),
&mut higher_prio_task_woken,
);
(notified, higher_prio_task_woken)
} else {
let notified = xTaskGenericNotify(
task,
0,
notification.into(),
eNotifyAction_eSetBits,
ptr::null_mut(),
);
(notified, 0)
};
(notified != 0, higher_prio_task_woken != 0)
}
pub fn get_idle_task(core: crate::cpu::Core) -> TaskHandle_t {
#[cfg(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6))]
{
if matches!(core, crate::cpu::Core::Core0) {
unsafe { xTaskGetIdleTaskHandle() }
} else {
unreachable!()
}
}
#[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
#[cfg(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0",
esp_idf_version = "5.1"
))]
unsafe {
xTaskGetIdleTaskHandleForCPU(core as u32)
}
#[cfg(not(any(esp32c3, esp32c2, esp32h2, esp32c5, esp32c6)))]
#[cfg(not(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0",
esp_idf_version = "5.1"
)))]
unsafe {
xTaskGetIdleTaskHandleForCore(core as i32)
}
}
#[cfg(feature = "alloc")]
pub fn block_on<F>(fut: F) -> F::Output
where
F: Future,
{
::log::trace!("block_on(): started");
let notification = notification::Notification::new();
let mut fut = core::pin::pin!(fut);
let waker = notification.notifier().into();
let mut cx = Context::from_waker(&waker);
let res = loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(res) => break res,
Poll::Pending => notification.wait_any(),
}
};
::log::trace!("block_on(): finished");
res
}
pub fn yield_now() -> impl Future<Output = ()> {
YieldNowFuture { yielded: false }
}
#[must_use = "futures do nothing unless you `.await` or poll them"]
struct YieldNowFuture {
yielded: bool,
}
impl Future for YieldNowFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.yielded {
Poll::Ready(())
} else {
self.yielded = true;
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
#[cfg(esp_idf_comp_pthread_enabled)]
pub mod thread {
use core::ffi::CStr;
use enumset::EnumSetType;
#[cfg(not(any(
esp_idf_version_major = "4",
all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
)))] use enumset::EnumSet;
use esp_idf_sys::*;
use super::NO_AFFINITY;
use crate::cpu::Core;
#[derive(Debug, EnumSetType)]
#[enumset(repr = "u32")] pub enum MallocCap {
Exec = 0,
Cap32bit = 1,
Cap8bit = 2,
Dma = 3,
Pid2 = 4,
Pid3 = 5,
Pid4 = 6,
Pid5 = 7,
Pid6 = 8,
Pid7 = 9,
Spiram = 10,
Internal = 11,
Default = 12,
Iram8bit = 13,
Retention = 14,
Rtcram = 15,
Tcm = 16,
Invalid = 31,
}
#[derive(Debug)]
pub struct ThreadSpawnConfiguration {
pub name: Option<&'static CStr>,
pub stack_size: usize,
pub priority: u8,
pub inherit: bool,
pub pin_to_core: Option<Core>,
#[cfg(not(any(
esp_idf_version_major = "4",
all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
)))] pub stack_alloc_caps: EnumSet<MallocCap>,
}
impl ThreadSpawnConfiguration {
pub fn get() -> Option<Self> {
get_conf()
}
pub fn set(&self) -> Result<(), EspError> {
set_conf(self)
}
}
impl Default for ThreadSpawnConfiguration {
fn default() -> Self {
get_default_conf()
}
}
impl From<&ThreadSpawnConfiguration> for esp_pthread_cfg_t {
fn from(conf: &ThreadSpawnConfiguration) -> Self {
#[allow(clippy::unwrap_or_default)]
Self {
thread_name: conf
.name
.map(|name| name.as_ptr() as _)
.unwrap_or(core::ptr::null()),
stack_size: conf.stack_size as _,
prio: conf.priority as _,
inherit_cfg: conf.inherit,
pin_to_core: conf.pin_to_core.map(Into::into).unwrap_or(NO_AFFINITY as _),
#[cfg(not(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0",
esp_idf_version = "5.1",
esp_idf_version = "5.2",
)))] stack_alloc_caps: conf.stack_alloc_caps.as_u32(),
}
}
}
impl From<esp_pthread_cfg_t> for ThreadSpawnConfiguration {
fn from(conf: esp_pthread_cfg_t) -> Self {
Self {
name: if conf.thread_name.is_null() {
None
} else {
Some(unsafe { CStr::from_ptr(conf.thread_name) })
},
stack_size: conf.stack_size as _,
priority: conf.prio as _,
inherit: conf.inherit_cfg,
pin_to_core: if conf.pin_to_core == NO_AFFINITY as _ {
None
} else {
Some(conf.pin_to_core.into())
},
#[cfg(not(any(
esp_idf_version_major = "4",
all(esp_idf_version_major = "5", esp_idf_version_minor = "0"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "1"),
all(esp_idf_version_major = "5", esp_idf_version_minor = "2"),
)))] stack_alloc_caps: EnumSet::<MallocCap>::from_u32(conf.stack_alloc_caps),
}
}
}
fn get_default_conf() -> ThreadSpawnConfiguration {
unsafe { esp_pthread_get_default_config() }.into()
}
fn get_conf() -> Option<ThreadSpawnConfiguration> {
let mut conf: esp_pthread_cfg_t = Default::default();
let res = unsafe { esp_pthread_get_cfg(&mut conf as _) };
if res == ESP_ERR_NOT_FOUND {
None
} else {
Some(conf.into())
}
}
fn set_conf(conf: &ThreadSpawnConfiguration) -> Result<(), EspError> {
if conf.priority < 1 || conf.priority as u32 >= configMAX_PRIORITIES {
panic!("Thread priority {} has to be [1 - 24]", conf.priority);
}
esp!(unsafe { esp_pthread_set_cfg(&conf.into()) })?;
Ok(())
}
}
pub struct CriticalSection(Cell<Option<NonNull<QueueDefinition>>>, AtomicBool);
const QUEUE_TYPE_RECURSIVE_MUTEX: u8 = 4;
#[inline(always)]
#[link_section = ".iram1.cs_enter"]
fn enter(cs: &CriticalSection) {
if !cs.1.load(Ordering::SeqCst) {
interrupt::free(|| {
if !cs.1.load(Ordering::SeqCst) {
let ptr = unsafe { xQueueCreateMutex(QUEUE_TYPE_RECURSIVE_MUTEX) };
cs.0.set(NonNull::new(ptr));
cs.1.store(true, Ordering::SeqCst);
}
});
}
let res =
unsafe { xQueueTakeMutexRecursive(cs.0.get().unwrap().as_ptr(), crate::delay::BLOCK) } != 0;
if !res {
unreachable!();
}
}
#[inline(always)]
#[link_section = ".iram1.cs_exit"]
fn exit(cs: &CriticalSection) {
if !cs.1.load(Ordering::SeqCst) {
panic!("Called exit() without matching enter()");
}
let res = unsafe { xQueueGiveMutexRecursive(cs.0.get().unwrap().as_ptr()) } != 0;
if !res {
unreachable!();
}
}
impl CriticalSection {
#[inline(always)]
pub const fn new() -> Self {
Self(Cell::new(None), AtomicBool::new(false))
}
#[inline(always)]
pub fn enter(&self) -> CriticalSectionGuard<'_> {
enter(self);
CriticalSectionGuard(self)
}
}
impl Drop for CriticalSection {
fn drop(&mut self) {
if self.1.load(Ordering::SeqCst) {
unsafe {
vQueueDelete(self.0.get().unwrap().as_ptr());
}
}
}
}
impl Default for CriticalSection {
#[inline(always)]
fn default() -> Self {
Self::new()
}
}
unsafe impl Send for CriticalSection {}
unsafe impl Sync for CriticalSection {}
pub struct CriticalSectionGuard<'a>(&'a CriticalSection);
impl Drop for CriticalSectionGuard<'_> {
#[inline(always)]
fn drop(&mut self) {
exit(self.0);
}
}
pub mod watchdog {
use core::{
marker::PhantomData,
sync::atomic::{AtomicUsize, Ordering},
};
use esp_idf_sys::*;
pub type TWDTConfig = config::Config;
pub mod config {
#[cfg(not(esp_idf_version_major = "4"))]
use esp_idf_sys::*;
#[derive(Clone)]
pub struct Config {
pub duration: core::time::Duration,
pub panic_on_trigger: bool,
pub subscribed_idle_tasks: enumset::EnumSet<crate::cpu::Core>,
}
impl Config {
pub fn new() -> Self {
#[cfg(esp_idf_esp_task_wdt)]
let duration = core::time::Duration::from_secs(
esp_idf_sys::CONFIG_ESP_TASK_WDT_TIMEOUT_S as u64,
);
#[cfg(not(esp_idf_esp_task_wdt))]
let duration = core::time::Duration::from_secs(5);
Self {
duration,
panic_on_trigger: cfg!(esp_idf_esp_task_wdt_panic),
subscribed_idle_tasks: {
let mut subscribed_idle_tasks = enumset::EnumSet::empty();
if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu0) {
subscribed_idle_tasks |= crate::cpu::Core::Core0;
}
#[cfg(any(esp32, esp32s3))]
if cfg!(esp_idf_esp_task_wdt_check_idle_task_cpu1) {
subscribed_idle_tasks |= crate::cpu::Core::Core1;
}
subscribed_idle_tasks
},
}
}
}
impl Default for Config {
fn default() -> Self {
Self::new()
}
}
#[cfg(not(esp_idf_version_major = "4"))]
impl From<&Config> for esp_task_wdt_config_t {
fn from(config: &Config) -> Self {
esp_task_wdt_config_t {
timeout_ms: config.duration.as_millis() as u32,
trigger_panic: config.panic_on_trigger,
idle_core_mask: config.subscribed_idle_tasks.as_u32(),
}
}
}
}
pub struct TWDTDriver<'d> {
init_by_idf: bool,
_marker: PhantomData<&'d mut ()>,
}
static TWDT_DRIVER_REF_COUNT: AtomicUsize = AtomicUsize::new(0);
impl<'d> TWDTDriver<'d> {
pub fn new(_twdt: TWDT<'d>, config: &config::Config) -> Result<Self, EspError> {
TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
let init_by_idf = Self::watchdog_is_init_by_idf();
#[cfg(not(esp_idf_version_major = "4"))]
if !init_by_idf {
esp!(unsafe { esp_task_wdt_init(&config.into() as *const esp_task_wdt_config_t) })?;
} else {
esp!(unsafe {
esp_task_wdt_reconfigure(&config.into() as *const esp_task_wdt_config_t)
})?;
}
#[cfg(esp_idf_version_major = "4")]
esp!(unsafe {
esp_task_wdt_init(config.duration.as_secs() as u32, config.panic_on_trigger)
})?;
#[cfg(esp_idf_version_major = "4")]
if let Err(e) = Self::subscribe_idle_tasks(config.subscribed_idle_tasks) {
if e.code() != ESP_ERR_INVALID_ARG {
return Err(e);
}
}
Ok(Self {
init_by_idf,
_marker: Default::default(),
})
}
pub fn watch_current_task(&mut self) -> Result<WatchdogSubscription<'_>, EspError> {
esp!(unsafe { esp_task_wdt_add(core::ptr::null_mut()) })?;
Ok(WatchdogSubscription::new())
}
#[cfg(esp_idf_version_major = "4")]
fn subscribe_idle_tasks(cores: enumset::EnumSet<crate::cpu::Core>) -> Result<(), EspError> {
for core in cores {
let task = super::get_idle_task(core);
esp!(unsafe { esp_task_wdt_add(task) })?;
}
Ok(())
}
#[cfg(esp_idf_version_major = "4")]
fn unsubscribe_idle_tasks() -> Result<(), EspError> {
for core in enumset::EnumSet::<crate::cpu::Core>::all() {
let task = super::get_idle_task(core);
esp!(unsafe { esp_task_wdt_delete(task) })?;
}
Ok(())
}
fn watchdog_is_init_by_idf() -> bool {
if cfg!(not(any(
esp_idf_version_major = "4",
esp_idf_version = "5.0"
))) {
cfg!(esp_idf_esp_task_wdt_init)
} else {
!matches!(
unsafe { esp_task_wdt_status(core::ptr::null_mut()) },
ESP_ERR_INVALID_STATE
)
}
}
fn deinit(&self) -> Result<(), EspError> {
if !self.init_by_idf {
#[cfg(esp_idf_version_major = "4")]
if let Err(e) = Self::unsubscribe_idle_tasks() {
if e.code() != ESP_ERR_INVALID_ARG {
return Err(e);
}
}
esp!(unsafe { esp_task_wdt_deinit() }).unwrap();
}
Ok(())
}
}
impl Clone for TWDTDriver<'_> {
fn clone(&self) -> Self {
TWDT_DRIVER_REF_COUNT.fetch_add(1, Ordering::SeqCst);
Self {
init_by_idf: self.init_by_idf,
_marker: Default::default(),
}
}
}
impl Drop for TWDTDriver<'_> {
fn drop(&mut self) {
let refcnt = TWDT_DRIVER_REF_COUNT.fetch_sub(1, Ordering::SeqCst);
match refcnt {
1 => self.deinit().unwrap(),
r if r < 1 => unreachable!(), _ => (),
}
}
}
unsafe impl Send for TWDTDriver<'_> {}
pub struct WatchdogSubscription<'s>(PhantomData<&'s mut ()>);
impl WatchdogSubscription<'_> {
fn new() -> Self {
Self(Default::default())
}
pub fn feed(&mut self) -> Result<(), EspError> {
esp!(unsafe { esp_task_wdt_reset() })
}
}
impl embedded_hal_0_2::watchdog::Watchdog for WatchdogSubscription<'_> {
fn feed(&mut self) {
Self::feed(self).unwrap()
}
}
impl Drop for WatchdogSubscription<'_> {
fn drop(&mut self) {
esp!(unsafe { esp_task_wdt_delete(core::ptr::null_mut()) }).unwrap();
}
}
crate::impl_peripheral!(TWDT);
}
#[cfg(feature = "critical-section")]
pub mod critical_section {
static CS: super::CriticalSection = super::CriticalSection::new();
pub struct EspCriticalSection {}
unsafe impl critical_section::Impl for EspCriticalSection {
unsafe fn acquire() {
super::enter(&CS);
}
unsafe fn release(_token: ()) {
super::exit(&CS);
}
}
pub type LinkWorkaround = [*mut (); 2];
static mut __INTERNAL_REFERENCE: LinkWorkaround = [
_critical_section_1_0_acquire as *mut _,
_critical_section_1_0_release as *mut _,
];
pub fn link() -> LinkWorkaround {
unsafe { __INTERNAL_REFERENCE }
}
critical_section::set_impl!(EspCriticalSection);
}
pub mod embassy_sync {
use embassy_sync::blocking_mutex::raw::RawMutex;
pub struct EspRawMutex(super::CriticalSection);
unsafe impl Send for EspRawMutex {}
unsafe impl Sync for EspRawMutex {}
impl Default for EspRawMutex {
fn default() -> Self {
Self::new()
}
}
impl EspRawMutex {
pub const fn new() -> Self {
Self(super::CriticalSection::new())
}
}
unsafe impl RawMutex for EspRawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
let _guard = self.0.enter();
f()
}
}
}
#[cfg(all(feature = "alloc", target_has_atomic = "ptr"))]
pub mod notification {
use core::marker::PhantomData;
use core::num::NonZeroU32;
use core::sync::atomic::{AtomicPtr, Ordering};
extern crate alloc;
use alloc::sync::Arc;
use alloc::task::Wake;
use esp_idf_sys::TickType_t;
use crate::task;
#[cfg(esp_idf_version_major = "4")]
type Task = core::ffi::c_void;
#[cfg(not(esp_idf_version_major = "4"))]
type Task = esp_idf_sys::tskTaskControlBlock;
pub struct Notification(Arc<Notifier>, PhantomData<*const ()>);
impl Notification {
pub fn new() -> Self {
Self(
Arc::new(Notifier(AtomicPtr::new(task::current().unwrap()))),
PhantomData,
)
}
pub fn notifier(&self) -> Arc<Notifier> {
self.0.clone()
}
pub fn wait_any(&self) {
loop {
if task::wait_notification(crate::delay::BLOCK).is_some() {
break;
}
}
}
pub fn wait(&self, timeout: TickType_t) -> Option<NonZeroU32> {
task::wait_notification(timeout)
}
}
impl Default for Notification {
fn default() -> Self {
Self::new()
}
}
pub struct Notifier(AtomicPtr<Task>);
impl Notifier {
pub unsafe fn notify(&self, notification: NonZeroU32) -> (bool, bool) {
let freertos_task = self.0.load(Ordering::SeqCst);
if !freertos_task.is_null() {
return unsafe { task::notify(freertos_task, notification) };
}
(false, false)
}
pub unsafe fn notify_and_yield(&self, notification: NonZeroU32) -> bool {
let freertos_task = self.0.load(Ordering::SeqCst);
if !freertos_task.is_null() {
unsafe { task::notify_and_yield(freertos_task, notification) }
} else {
false
}
}
}
impl Wake for Notifier {
fn wake(self: Arc<Self>) {
unsafe {
self.notify_and_yield(NonZeroU32::new(1).unwrap());
}
}
}
}
pub mod queue {
use core::{
marker::PhantomData,
mem::{size_of, MaybeUninit},
};
use esp_idf_sys::{EspError, TickType_t, ESP_FAIL};
use crate::sys;
pub struct Queue<T> {
ptr: sys::QueueHandle_t,
is_owned: bool,
_marker: PhantomData<T>,
}
unsafe impl<T> Send for Queue<T> where T: Send + Sync {}
unsafe impl<T> Sync for Queue<T> where T: Send + Sync {}
impl<T> Queue<T>
where
T: Copy,
{
pub fn new(size: usize) -> Self {
Queue {
ptr: unsafe { sys::xQueueGenericCreate(size as u32, size_of::<T>() as u32, 0) },
is_owned: true,
_marker: PhantomData,
}
}
pub unsafe fn new_borrowed(ptr: sys::QueueHandle_t) -> Self {
assert!(!ptr.is_null());
Queue {
ptr,
is_owned: false,
_marker: PhantomData,
}
}
#[inline]
pub fn as_raw(&self) -> sys::QueueHandle_t {
self.ptr
}
#[inline]
pub fn send_back(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
self.send_generic(item, timeout, 0)
}
#[inline]
pub fn send_front(&self, item: T, timeout: TickType_t) -> Result<bool, EspError> {
self.send_generic(item, timeout, 1)
}
#[inline]
fn send_generic(
&self,
item: T,
timeout: TickType_t,
copy_position: i32,
) -> Result<bool, EspError> {
let mut hp_task_awoken: i32 = false as i32;
let success = unsafe {
if crate::interrupt::active() {
sys::xQueueGenericSendFromISR(
self.ptr,
&item as *const T as *const _,
&mut hp_task_awoken,
copy_position,
)
} else {
sys::xQueueGenericSend(
self.ptr,
&item as *const T as *const _,
timeout,
copy_position,
)
}
};
let success = success == 1;
let hp_task_awoken = hp_task_awoken == 1;
match success {
true => Ok(hp_task_awoken),
false => Err(EspError::from_infallible::<ESP_FAIL>()),
}
}
#[inline]
pub fn recv_front(&self, timeout: TickType_t) -> Option<(T, bool)> {
let mut buf = MaybeUninit::uninit();
let mut hp_task_awoken = false as i32;
unsafe {
let success = if crate::interrupt::active() {
sys::xQueueReceiveFromISR(
self.ptr,
buf.as_mut_ptr() as *mut _,
&mut hp_task_awoken,
)
} else {
sys::xQueueReceive(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
};
if success == 1 {
Some((buf.assume_init(), hp_task_awoken == 1))
} else {
None
}
}
}
#[inline]
pub fn peek_front(&self, timeout: TickType_t) -> Option<T> {
let mut buf = MaybeUninit::uninit();
unsafe {
let success = if crate::interrupt::active() {
sys::xQueuePeekFromISR(self.ptr, buf.as_mut_ptr() as *mut _)
} else {
sys::xQueuePeek(self.ptr, buf.as_mut_ptr() as *mut _, timeout)
};
if success == 1 {
Some(buf.assume_init())
} else {
None
}
}
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
if self.is_owned {
unsafe { sys::vQueueDelete(self.ptr) }
}
}
}
}
pub mod asynch {
use core::future::Future;
use core::num::NonZeroU32;
use core::sync::atomic::{AtomicU32, Ordering};
use core::task::{Context, Poll, Waker};
use atomic_waker::AtomicWaker;
pub struct Notification {
waker: AtomicWaker,
notified: AtomicU32,
}
impl Default for Notification {
fn default() -> Self {
Self::new()
}
}
impl Notification {
pub const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
notified: AtomicU32::new(0),
}
}
pub fn notify_lsb(&self) -> bool {
self.notify(NonZeroU32::new(1).unwrap())
}
pub fn notify(&self, bits: NonZeroU32) -> bool {
if let Some(waker) = self.notify_waker(bits) {
waker.wake();
true
} else {
false
}
}
pub fn notify_waker(&self, bits: NonZeroU32) -> Option<Waker> {
self.notified.fetch_or(bits.into(), Ordering::SeqCst);
self.waker.take()
}
pub fn reset(&self) {
self.waker.take();
self.notified.store(0, Ordering::SeqCst);
}
#[allow(unused)]
pub fn wait(&self) -> impl Future<Output = NonZeroU32> + '_ {
core::future::poll_fn(move |cx| self.poll_wait(cx))
}
pub fn poll_wait(&self, cx: &Context<'_>) -> Poll<NonZeroU32> {
self.waker.register(cx.waker());
let bits = self.notified.swap(0, Ordering::SeqCst);
if let Some(bits) = NonZeroU32::new(bits) {
Poll::Ready(bits)
} else {
Poll::Pending
}
}
}
impl Drop for Notification {
fn drop(&mut self) {
self.reset();
}
}
}