#![no_std]
use core::{
mem::offset_of,
sync::atomic::{AtomicU32, AtomicUsize, Ordering},
};
#[derive(Default)]
#[repr(C)]
pub struct TaskBase {
stack: AtomicUsize,
thread_addr_limit: AtomicUsize,
thread_flags: AtomicU32,
thread_preempt: AtomicU32,
}
impl TaskBase {
pub const fn new() -> Self {
Self {
stack: AtomicUsize::new(0),
thread_addr_limit: AtomicUsize::new(0),
thread_flags: AtomicU32::new(0),
thread_preempt: AtomicU32::new(0),
}
}
pub fn set_stack(&self, stack_base: usize) {
self.stack.store(stack_base, Ordering::Relaxed);
}
#[inline(always)]
pub fn stack(&self) -> usize {
self.stack.load(Ordering::Relaxed)
}
#[inline(always)]
pub fn set_preempt(&self, val: u32) {
self.thread_preempt.store(val, Ordering::Relaxed);
}
#[inline(always)]
pub fn preempt(&self) -> u32 {
self.thread_preempt.load(Ordering::Relaxed)
}
#[inline(always)]
pub fn preempt_add(&self, val: u32) {
self.thread_preempt.fetch_add(val, Ordering::Relaxed);
}
#[inline(always)]
pub fn preempt_sub(&self, val: u32) {
self.thread_preempt.fetch_sub(val, Ordering::Relaxed);
}
#[inline(always)]
pub fn set_flags(&self, flags: u32) {
self.thread_flags.store(flags, Ordering::Relaxed);
}
#[inline(always)]
pub fn set_flags_bit(&self, bit: u32) {
self.thread_flags.fetch_or(1 << bit, Ordering::Relaxed);
}
#[inline(always)]
pub fn flags(&self) -> u32 {
self.thread_flags.load(Ordering::Relaxed)
}
#[inline(always)]
pub fn test_flags_bit(&self, bit: u32) -> bool {
self.thread_flags.load(Ordering::Relaxed) & (1 << bit) != 0
}
}
pub const TSK_STACK: usize = offset_of!(TaskBase, stack);
pub const TSK_TI_ADDR_LIMIT: usize = offset_of!(TaskBase, thread_addr_limit);
pub const TSK_TI_FLAGS: usize = offset_of!(TaskBase, thread_flags);
pub const TSK_TI_PREEMPT: usize = offset_of!(TaskBase, thread_preempt);
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test() {
assert_eq!(TSK_STACK, 0);
assert_eq!(TSK_TI_ADDR_LIMIT, 8);
assert_eq!(TSK_TI_FLAGS, 16);
assert_eq!(TSK_TI_PREEMPT, 20);
}
}