preemptive_threads/
thread.rs1pub type ThreadId = usize;
2
3#[derive(Debug, Clone, Copy, PartialEq, Eq)]
4pub enum ThreadState {
5 Ready,
6 Running,
7 Blocked,
8 Finished,
9}
10
11#[repr(C)]
12pub struct ThreadContext {
13 pub rsp: u64,
14 pub rbp: u64,
15 pub rbx: u64,
16 pub r12: u64,
17 pub r13: u64,
18 pub r14: u64,
19 pub r15: u64,
20 pub rflags: u64,
21 pub rip: u64,
22}
23
24pub struct Thread {
25 pub id: ThreadId,
26 pub state: ThreadState,
27 pub context: ThreadContext,
28 pub stack: &'static mut [u8],
29 pub stack_top: *mut u8,
30 pub stack_bottom: *mut u8,
31 pub entry_point: fn(),
32 pub priority: u8,
33 pub stack_guard: u64,
34 pub join_waiters: [Option<ThreadId>; 4],
35}
36
37impl Thread {
38 pub const STACK_SIZE: usize = 64 * 1024;
39
40 pub fn new(id: ThreadId, stack: &'static mut [u8], entry_point: fn(), priority: u8) -> Self {
41 let stack_top = unsafe { stack.as_mut_ptr().add(stack.len()) };
42 let stack_bottom = stack.as_mut_ptr();
43 let stack_guard = 0xDEADBEEFCAFEBABE;
44
45 unsafe {
46 core::ptr::write(stack_bottom as *mut u64, stack_guard);
47 }
48
49 let mut thread = Thread {
50 id,
51 state: ThreadState::Ready,
52 context: ThreadContext {
53 rsp: 0,
54 rbp: 0,
55 rbx: 0,
56 r12: 0,
57 r13: 0,
58 r14: 0,
59 r15: 0,
60 rflags: 0x202,
61 rip: 0,
62 },
63 stack,
64 stack_top,
65 stack_bottom,
66 entry_point,
67 priority,
68 stack_guard,
69 join_waiters: [None; 4],
70 };
71
72 thread.initialize_stack();
73 thread
74 }
75
76 fn initialize_stack(&mut self) {
77 unsafe {
78 let stack_ptr = self.stack_top as *mut u64;
79
80 let stack_ptr = stack_ptr.sub(1);
81 *stack_ptr = thread_wrapper as usize as u64;
82
83 let stack_ptr = stack_ptr.sub(1);
84 *stack_ptr = 0;
85
86 let stack_ptr = stack_ptr.sub(1);
87 *stack_ptr = self.entry_point as usize as u64;
88
89 let stack_ptr = stack_ptr.sub(1);
90 *stack_ptr = 0;
91
92 let stack_ptr = stack_ptr.sub(1);
93 *stack_ptr = 0;
94
95 let stack_ptr = stack_ptr.sub(1);
96 *stack_ptr = 0;
97
98 let stack_ptr = stack_ptr.sub(1);
99 *stack_ptr = 0;
100
101 let stack_ptr = stack_ptr.sub(1);
102 *stack_ptr = 0;
103
104 let stack_ptr = stack_ptr.sub(1);
105 *stack_ptr = 0;
106
107 let stack_ptr = stack_ptr.sub(1);
108 *stack_ptr = 0x202;
109
110 self.context.rsp = stack_ptr as u64;
111 self.context.rip = thread_entry as usize as u64;
112 }
113 }
114
115 pub fn is_runnable(&self) -> bool {
116 self.state == ThreadState::Ready || self.state == ThreadState::Running
117 }
118
119 pub fn check_stack_overflow(&self) -> bool {
120 unsafe {
121 let guard_value = core::ptr::read(self.stack_bottom as *const u64);
122 guard_value != self.stack_guard
123 }
124 }
125}
126
127extern "C" fn thread_entry() {
128 unsafe {
129 let scheduler = crate::scheduler::SCHEDULER.get();
130 if let Some(current_id) = scheduler.get_current_thread() {
131 if let Some(thread) = scheduler.get_thread(current_id) {
132 (thread.entry_point)();
133 }
134 }
135 }
136 crate::sync::exit_thread();
137}
138
139extern "C" fn thread_wrapper() -> ! {
140 thread_entry();
141 crate::sync::exit_thread();
142}