fe_rtos 0.1.1

A simple OS for Arm Cortex-M CPUs
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
extern crate alloc;
extern crate crossbeam_queue;

mod schedule;
mod task_state;
mod tick;

use crate::spinlock::Spinlock;
use crate::syscall;
use crate::task::schedule::{RoundRobin, Scheduler};
use crate::task::task_state::{TaskState, TaskStateStruct};
use crate::task::tick::TickCounter;
use alloc::boxed::Box;
use alloc::collections::LinkedList;
use alloc::sync::Arc;
use alloc::vec;
use alloc::vec::Vec;
use core::mem::size_of;
use core::ptr::null_mut;
use core::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use crossbeam_queue::SegQueue;
use fe_osi::semaphore::Semaphore;

#[repr(C)]
union StackPtr {
    reference: &'static usize,
    ptr: *const usize,
    num: usize,
}

#[repr(C)]
pub(crate) struct Task {
    //Stack pointer
    sp: StackPtr,
    //Entry point into task
    //Holds the smart pointer to a dynamically allocated stack is applicable
    //It needs to be a smart type to easily allow for deallocation of the stack
    //if a task is deleted
    dynamic_stack: Vec<usize>,
    //Stores the param of the task if it was created with the task_spawn syscall
    //This will be a raw pointer to a Box that will need to be freed
    task_info: Option<Box<NewTaskInfo>>,
    state: TaskStateStruct,
    pub(crate) pid: usize,
}

unsafe impl Send for Task {}
unsafe impl Sync for Task {}

pub(crate) struct NewTaskInfo {
    pub ep: *const u32,
    pub param: *mut u32,
}

const INIT_XPSR: usize = 0x01000000;
/// The default and recommended stack size for a task.
pub const DEFAULT_STACK_SIZE: usize = 1536;

static mut KERNEL_STACK: [usize; DEFAULT_STACK_SIZE] = [0; DEFAULT_STACK_SIZE];
static mut TICKS: TickCounter = TickCounter::new();
static mut PLACEHOLDER_TASK: Task = Task {
    sp: StackPtr { num: 0 },
    dynamic_stack: Vec::new(),
    task_info: None,
    state: TaskStateStruct::new(),
    pid: 0,
};
static PUSHING_TASK: AtomicBool = AtomicBool::new(false);
static mut SCHEDULER: RoundRobin = RoundRobin::new();
static mut KERNEL_TASK: Option<Arc<Task>> = None;
static mut NEXT_TASK: Option<Arc<Task>> = None;
static mut CUR_TASK: Option<&mut Task> = None;
lazy_static! {
    static ref NEW_TASK_QUEUE: SegQueue<Arc<Task>> = SegQueue::new();
}

static mut TRIGGER_CONTEXT_SWITCH: fn() = idle;

extern "C" {
    /// Context switch interrupt handler. This should never be called directly.
    pub fn context_switch();
}

unsafe fn get_cur_task_mut() -> &'static mut Task {
    match &mut CUR_TASK {
        Some(task) => task,
        None => &mut PLACEHOLDER_TASK,
    }
}

#[no_mangle]
pub(crate) unsafe extern "C" fn get_cur_task() -> &'static Task {
    match &CUR_TASK {
        Some(task) => task,
        None => &PLACEHOLDER_TASK,
    }
}

#[no_mangle]
pub(crate) unsafe extern "C" fn set_cur_task(new_val: &'static mut Task) {
    CUR_TASK = Some(new_val);
}

#[no_mangle]
pub(crate) unsafe extern "C" fn get_next_task() -> &'static Task {
    match &NEXT_TASK {
        Some(task) => task,
        None => &PLACEHOLDER_TASK,
    }
}

unsafe fn scheduler() {
    let default_task = match &KERNEL_TASK {
        Some(task) => Arc::clone(&task),
        //The code should never get here
        None => panic!("No valid KERNEL_TASK in scheduler"),
    };

    //Make sure we don't accidentally drop a task
    let count = match &NEXT_TASK {
        Some(task) => Arc::strong_count(&task),
        None => 0xFF,
    };
    if count == 1 {
        return;
    }

    //Find the next task to run if there is one
    match SCHEDULER.next() {
        Some(task) => {
            NEXT_TASK = Some(task);
        }
        None => {
            NEXT_TASK = Some(Arc::clone(&default_task));
        }
    }
}

pub(crate) unsafe fn do_context_switch() {
    static CS_LOCK: Spinlock = Spinlock::new();

    if CS_LOCK.try_take() {
        scheduler();
        CS_LOCK.give();
        TRIGGER_CONTEXT_SWITCH();
    }
}

/// The systick interrupt handler.
///
/// # Safety
/// This function should not be called directly, and should only be called by
/// the system tick interrupt.
pub unsafe extern "C" fn sys_tick() {
    TICKS.inc();
    do_context_switch();
}

//Puts the currently running thread to sleep for at least the specified number
//of ticks
pub(crate) fn sleep(sleep_ticks: u64) -> bool {
    unsafe {
        let ret_val = get_cur_task()
            .state
            .try_set(TaskState::Asleep(TICKS.get() + sleep_ticks));
        //Trigger a context switch and wait until that happens
        do_context_switch();

        ret_val
    }
}

//Has the currently running thread block until the semaphore it's blocking on
//is available
pub(crate) fn block(sem: *const Semaphore) -> bool {
    unsafe {
        let ret_val = get_cur_task().state.try_set(TaskState::Blocking(sem));
        do_context_switch();

        ret_val
    }
}

//See section 2.5.7.1 and Figure 2-7 in the TM4C123 datasheet for more information
unsafe fn set_initial_stack(
    stack_ptr: *const usize,
    entry_point: *const usize,
    param: *mut usize,
) -> *const usize {
    let mut cur_ptr = ((stack_ptr as usize) - size_of::<usize>()) as *mut usize;

    //Set the xPSR
    *cur_ptr = INIT_XPSR;
    cur_ptr = (cur_ptr as usize - size_of::<usize>()) as *mut usize;

    //Set the PC
    *cur_ptr = entry_point as usize;
    cur_ptr = (cur_ptr as usize - size_of::<usize>()) as *mut usize;

    //Set the LR
    *cur_ptr = idle as usize;
    cur_ptr = (cur_ptr as usize - size_of::<usize>()) as *mut usize;

    for _i in 0..13 {
        *cur_ptr = param as usize;
        cur_ptr = (cur_ptr as usize - size_of::<usize>()) as *mut usize;
    }
    (cur_ptr as usize + size_of::<usize>()) as *const usize
}

fn get_new_pid() -> usize {
    static PID: AtomicUsize = AtomicUsize::new(1);
    PID.fetch_add(1, Ordering::SeqCst)
}

pub(crate) unsafe fn add_task(
    stack_size: usize,
    entry_point: *const usize,
    param: *mut usize,
) -> Arc<Task> {
    let mut new_task = Task {
        sp: StackPtr { num: 0 },
        dynamic_stack: vec![0; stack_size],
        task_info: None,
        state: TaskStateStruct::new(),
        pid: get_new_pid(),
    };

    //Convert the adress of the first element of the vector into a ptr for the stack
    new_task.sp.ptr = &new_task.dynamic_stack[0] as *const usize;
    //Arm uses a full descending stack so we have to start from the top
    new_task.sp.num += size_of::<usize>() * (stack_size as usize);

    new_task.sp.ptr = set_initial_stack(new_task.sp.ptr, entry_point, param);

    let task_ref = Arc::new(new_task);

    PUSHING_TASK.store(true, Ordering::SeqCst);
    NEW_TASK_QUEUE.push(Arc::clone(&task_ref));
    PUSHING_TASK.store(false, Ordering::SeqCst);

    task_ref
}

pub(crate) unsafe fn add_task_static(
    stack_ptr: &'static usize,
    stack_size: usize,
    entry_point: *const usize,
    param: Option<*mut usize>,
) -> Arc<Task> {
    let mut new_task = Task {
        sp: StackPtr {
            reference: stack_ptr,
        },
        dynamic_stack: Vec::new(),
        task_info: None,
        state: TaskStateStruct::new(),
        pid: get_new_pid(),
    };

    //Arm uses a full descending stack so we have to start from the top
    new_task.sp.num += size_of::<usize>() * (stack_size as usize);
    let param_ptr = match param {
        Some(p) => p as *mut usize,
        None => null_mut(),
    };

    new_task.sp.ptr = set_initial_stack(new_task.sp.ptr, entry_point, param_ptr);

    let task_ref = Arc::new(new_task);

    PUSHING_TASK.store(true, Ordering::SeqCst);
    NEW_TASK_QUEUE.push(Arc::clone(&task_ref));
    PUSHING_TASK.store(false, Ordering::SeqCst);

    task_ref
}

//This function is called by the task spawn syscall.
//This function handles cleaning up after a task when
//it returns
pub(crate) fn new_task_helper(task_info: Box<NewTaskInfo>) -> ! {
    let task_param: &u32 = unsafe { &*task_info.param };
    let task_ep: fn(&u32) = unsafe { core::mem::transmute(task_info.ep) };
    let task = unsafe { get_cur_task_mut() };

    task.task_info = Some(task_info);

    task_ep(task_param);

    fe_osi::exit();

    loop {}
}

pub(crate) unsafe fn remove_task() -> bool {
    let ret_val = get_cur_task().state.try_set(TaskState::Zombie);
    do_context_switch();

    ret_val
}

/// Starts the FeRTOS scheduler to begin executing tasks.
///
/// trigger_context_switch is a pointer to a function that forces a context switch
/// enable_systic is a closure that enables the systick interrupt
/// reload_val is the number of counts on the systick counter in a tick
pub fn start_scheduler<F: FnOnce(usize)>(
    trigger_context_switch: fn(),
    enable_systick: F,
    reload_val: usize,
) {
    syscall::link_syscalls();

    unsafe {
        let kernel_task_ref = add_task_static(
            &KERNEL_STACK[0],
            DEFAULT_STACK_SIZE,
            kernel as *const usize,
            None,
        );
        //Add something to the scheduler queue so something can run right away
        KERNEL_TASK = Some(Arc::clone(&kernel_task_ref));
        TRIGGER_CONTEXT_SWITCH = trigger_context_switch;
    }

    enable_systick(reload_val);

    loop {}
}

fn kernel(_: &mut u32) {
    let mut first_push = true;
    let mut task_list: LinkedList<Arc<Task>> = LinkedList::new();

    loop {
        let mut task_num: usize = 0;
        let mut deleted_task_num: usize = 0;
        let mut delete_task = false;

        //Make sure the kernel gets added to the scheduler when it starts
        if first_push {
            unsafe {
                super::disable_interrupts();
            }
        }
        //Add all new tasks to the task list
        while !NEW_TASK_QUEUE.is_empty() && !PUSHING_TASK.load(Ordering::SeqCst) {
            match NEW_TASK_QUEUE.pop() {
                Ok(new_task) => {
                    task_list.push_back(Arc::clone(&new_task));
                    unsafe {
                        SCHEDULER.add_task(new_task);
                    }
                }
                Err(_) => {
                    break;
                }
            }
        }
        if first_push {
            first_push = false;
            unsafe {
                super::enable_interrupts();
            }
        }

        for task in task_list.iter() {
            let mut new_state = TaskState::Runnable;
            let mut has_new_state = false;
            //We want to default to Runnable because if a task is in a transition state,
            //it should be scheduled so it can finish transitioning.
            let task_state = task.state.try_get().unwrap_or(TaskState::Runnable);

            match task_state {
                TaskState::Runnable => {}
                TaskState::Asleep(wake_up_ticks) => {
                    unsafe {
                        //If this task is asleep, see if we should wake it up
                        if wake_up_ticks < TICKS.get() {
                            //If we wake the task up, we can run it
                            has_new_state = true;
                            new_state = TaskState::Runnable;
                        }
                    }
                }
                TaskState::Blocking(sem) => {
                    let sem_ref: &Semaphore = unsafe { &*sem };
                    //If the semaphore this task is blocking on is available
                    //to be taken, wake up the task so it can attempt to take it
                    if sem_ref.is_available() {
                        has_new_state = true;
                        new_state = TaskState::Runnable;
                    }
                }
                TaskState::Zombie => {
                    delete_task = true;
                    deleted_task_num = task_num;
                }
            }

            if has_new_state {
                //If this fails, the kernel will just take care of it in the next run through
                task.state.try_set(new_state);
            }

            task_num += 1;
        }

        //Delete a task if there's a task to be deleted
        if delete_task {
            //If this task has been removed, remove it from the list
            //and rust will dealloc almost eveything
            let removed_task = task_list.remove(deleted_task_num);

            //We still need to manually dealloc the parameter though
            match &removed_task.task_info {
                Some(info) => {
                    if !info.param.is_null() {
                        unsafe {
                            core::mem::drop(Box::from_raw(info.param));
                        }
                    }
                }
                None => (),
            }

            //Clear any remaining leftover dynamically allocated data
            unsafe {
                crate::fe_alloc::clear_deleted_task(removed_task.pid);
            }
        }

        //Going through the loop multiple times without anything else running is
        //useless since their states will not have changed, so do a context switch
        unsafe {
            do_context_switch();
        }
    }
}

fn idle() {
    loop {}
}