embedded_threads/
lib.rs

1#![no_std]
2#![feature(inline_const)]
3#![feature(naked_functions)]
4
5use core::arch::asm;
6use core::cell::UnsafeCell;
7use core::ptr::write_volatile;
8
9use cortex_m_semihosting::hprintln as println;
10
11pub use riot_rs_runqueue::ThreadId;
12use riot_rs_runqueue::{RunQueue, RunqueueId};
13
14mod arch;
15pub use arch::{interrupt, schedule, CriticalSection, Mutex};
16use threadlist::ThreadList;
17
18pub mod lock;
19mod threadlist;
20
21/// global defining the number of possible priority levels
22pub const SCHED_PRIO_LEVELS: usize = 8;
23
24/// global defining the number of threads that can be created
25pub const THREADS_NUMOF: usize = 8;
26
27pub static THREADS: Mutex<UnsafeCell<Threads>> = Mutex::new(UnsafeCell::new(Threads::new()));
28
29pub struct Threads {
30    /// global thread runqueue
31    runqueue: RunQueue<SCHED_PRIO_LEVELS, THREADS_NUMOF>,
32    threads: [Thread; THREADS_NUMOF],
33    current_thread: Option<ThreadId>,
34}
35
36impl Threads {
37    const fn new() -> Self {
38        Self {
39            runqueue: RunQueue::new(),
40            threads: [const { Thread::default() }; THREADS_NUMOF],
41            current_thread: None,
42        }
43    }
44
45    /// get the global THREADS list, mutable
46    #[allow(clippy::mut_from_ref)]
47    pub(crate) unsafe fn get_mut(cs: &CriticalSection) -> &mut Threads {
48        &mut *THREADS.borrow(cs).get()
49    }
50
51    pub(crate) fn get(cs: &CriticalSection) -> &Threads {
52        unsafe { &*THREADS.borrow(cs).get() }
53    }
54
55    pub(crate) fn by_pid_unckecked(&mut self, thread_id: ThreadId) -> &mut Thread {
56        &mut self.threads[thread_id as usize]
57    }
58
59    pub(crate) fn current(&mut self) -> Option<&mut Thread> {
60        self.current_thread
61            .map(|tid| &mut self.threads[tid as usize])
62    }
63
64    pub fn current_pid(&self) -> Option<ThreadId> {
65        self.current_thread
66    }
67
68    /// Create a new thread
69    pub fn create(
70        &mut self,
71        func: usize,
72        arg: usize,
73        stack: &mut [u8],
74        prio: u8,
75    ) -> Option<&mut Thread> {
76        if let Some((thread, pid)) = self.get_unused() {
77            thread.sp = Thread::setup_stack(stack, func, arg);
78            thread.prio = prio;
79            thread.pid = pid;
80            thread.state = ThreadState::Paused;
81
82            Some(thread)
83        } else {
84            None
85        }
86    }
87
88    // get an unused ThreadId / Thread slot
89    fn get_unused(&mut self) -> Option<(&mut Thread, ThreadId)> {
90        for i in 0..THREADS_NUMOF {
91            if self.threads[i].state == ThreadState::Invalid {
92                return Some((&mut self.threads[i], i as ThreadId));
93            }
94        }
95        None
96    }
97
98    /// set state of thread
99    ///
100    /// This function handles adding/removing the thread to the Runqueue depending
101    /// on its previous or new state.
102    pub(crate) fn set_state(&mut self, pid: ThreadId, state: ThreadState) {
103        let thread = &mut self.threads[pid as usize];
104        let old_state = thread.state;
105        thread.state = state;
106        if old_state != ThreadState::Running && state == ThreadState::Running {
107            //println!("adding {} to runqueue", thread.pid);
108
109            self.runqueue.add(thread.pid, thread.prio);
110        } else if old_state == ThreadState::Running && state != ThreadState::Running {
111            self.runqueue.del(thread.pid, thread.prio);
112        }
113    }
114
115    fn wait_on(&mut self, thread_id: ThreadId, thread_list: &mut ThreadList, state: ThreadState) {
116        let thread = &mut self.threads[thread_id as usize];
117        // TODO: sort by priority
118        thread.next = thread_list.head;
119        thread_list.head = Some(thread_id);
120        self.set_state(thread_id, state);
121        arch::schedule();
122    }
123
124    fn current_wait_on(&mut self, thread_list: &mut ThreadList, state: ThreadState) {
125        let thread_id = self.current_pid().unwrap();
126        self.wait_on(thread_id, thread_list, state)
127    }
128
129    fn wake_pid(&mut self, thread_id: ThreadId) {
130        let thread = &mut self.threads[thread_id as usize];
131        thread.next = None;
132        self.set_state(thread_id, ThreadState::Running);
133        arch::schedule();
134    }
135}
136
137/// start threading
138///
139/// Supposed to be started early on by OS startup code.
140///
141/// # Safety
142/// This may only be called once.
143pub unsafe fn start_threading() {
144    // faking a critical section to get THREADS
145    let cs = CriticalSection::new();
146    let threads = Threads::get_mut(&cs);
147
148    let next_pid = threads.runqueue.get_next().unwrap();
149    threads.current_thread = Some(next_pid);
150    let next_sp = threads.threads[next_pid as usize].sp;
151    arch::start_threading(next_sp);
152}
153
154/// scheduler
155#[no_mangle]
156unsafe fn sched(old_sp: usize) {
157    let cs = CriticalSection::new();
158
159    let next_pid;
160
161    loop {
162        {
163            let threads = Threads::get_mut(&cs);
164            if let Some(pid) = threads.runqueue.get_next() {
165                next_pid = pid;
166                break;
167            }
168        }
169        //pm_set_lowest();
170        cortex_m::interrupt::enable();
171        // pending interrupts would now get to run their ISRs
172        cortex_m::interrupt::disable();
173    }
174
175    let threads = Threads::get_mut(&cs);
176    let current_high_regs;
177
178    if let Some(current_pid) = threads.current_pid() {
179        if next_pid == current_pid {
180            asm!("", in("r0") 0);
181            return;
182        }
183        //println!("current: {} next: {}", current_pid, next_pid);
184        threads.threads[current_pid as usize].sp = old_sp;
185        threads.current_thread = Some(next_pid);
186        current_high_regs = threads.threads[current_pid as usize].high_regs.as_ptr();
187    } else {
188        current_high_regs = core::ptr::null();
189    }
190    let next = &threads.threads[next_pid as usize];
191
192    //println!("old_sp: {:x} next.sp: {:x}", old_sp, next.sp);
193
194    // PendSV expects these three pointers in r0, r1 and r2:
195    // r0= &current.high_regs
196    // r1= &next.high_regs
197    // r2= &next.sp
198    //
199    // write to registers manually, as ABI would return the values via stack
200    asm!("", in("r0") current_high_regs, in("r1") next.high_regs.as_ptr(), in("r2")next.sp);
201}
202
203//}
204
205/// Main struct for holding thread data
206#[derive(Debug)]
207pub struct Thread {
208    sp: usize,
209    high_regs: [usize; 8],
210    pub(crate) state: ThreadState,
211    pub next: Option<ThreadId>,
212    pub prio: RunqueueId,
213    pub pid: ThreadId,
214}
215
216/// Possible states of a thread
217#[derive(Copy, Clone, PartialEq, Debug)]
218pub enum ThreadState {
219    Invalid,
220    Running,
221    Paused,
222    LockWait,
223}
224
225impl Thread {
226    /// create a default Thread object
227    pub const fn default() -> Thread {
228        Thread {
229            sp: 0,
230            state: ThreadState::Invalid,
231            high_regs: [0; 8],
232            next: None,
233            prio: 0,
234            pid: 0,
235        }
236    }
237
238    /// Sets up stack for newly created threads.
239    ///
240    /// After running this, the stack should look as if the thread was
241    /// interrupted by an ISR. On the next return, it starts executing
242    /// `func`.
243    fn setup_stack(stack: &mut [u8], func: usize, arg: usize) -> usize {
244        let stack_start = stack.as_ptr() as usize;
245        let stack_pos = (stack_start + stack.len() - 36) as *mut usize;
246
247        unsafe {
248            write_volatile(stack_pos.offset(0), arg); // -> R0
249            write_volatile(stack_pos.offset(1), 1); // -> R1
250            write_volatile(stack_pos.offset(2), 2); // -> R2
251            write_volatile(stack_pos.offset(3), 3); // -> R3
252            write_volatile(stack_pos.offset(4), 12); // -> R12
253            write_volatile(stack_pos.offset(5), cleanup as usize); // -> LR
254            write_volatile(stack_pos.offset(6), func); // -> PC
255            write_volatile(stack_pos.offset(7), 0x01000000); // -> APSR
256        }
257
258        stack_pos as usize
259    }
260}
261
262pub trait Arguable {
263    fn into_arg(self) -> usize;
264}
265
266impl Arguable for usize {
267    fn into_arg(self) -> usize {
268        self
269    }
270}
271
272impl<T> Arguable for &T {
273    fn into_arg(self) -> usize {
274        self as *const T as usize
275    }
276}
277
278pub fn thread_create<T: Arguable + Send>(func: fn(arg: T), arg: T, stack: &mut [u8], prio: u8) {
279    let arg = arg.into_arg();
280    thread_create_raw(func as usize, arg, stack, prio)
281}
282
283pub fn thread_create_raw(func: usize, arg: usize, stack: &mut [u8], prio: u8) {
284    interrupt::free(|cs| {
285        let threads = unsafe { Threads::get_mut(cs) };
286        let pid = threads.create(func, arg, stack, prio).unwrap().pid;
287        threads.set_state(pid, ThreadState::Running);
288    });
289}
290
291pub fn current_pid() -> Option<ThreadId> {
292    interrupt::free(|cs| unsafe { Threads::get_mut(cs) }.current_pid())
293}
294
295/// thread cleanup function
296///
297/// This gets hooked into a newly created thread stack so it gets called when
298/// the thread function returns.
299fn cleanup() -> ! {
300    let pid = interrupt::free(|cs| {
301        let threads = unsafe { Threads::get_mut(cs) };
302        let thread_id = threads.current_pid().unwrap();
303        threads.set_state(thread_id, ThreadState::Invalid);
304        thread_id
305    });
306
307    println!("thread {}: exited", pid);
308
309    arch::schedule();
310
311    unreachable!();
312}