1#![allow(clippy::type_complexity)]
2
3use alloc::boxed::Box;
4use alloc::collections::{BTreeMap, VecDeque};
5use alloc::rc::Rc;
6use alloc::sync::Arc;
7#[cfg(feature = "smp")]
8use alloc::vec::Vec;
9use core::cell::RefCell;
10use core::future::{self, Future};
11use core::ptr;
12#[cfg(all(target_arch = "x86_64", feature = "smp"))]
13use core::sync::atomic::AtomicBool;
14use core::sync::atomic::{AtomicI32, AtomicU32, Ordering};
15use core::task::Poll::Ready;
16use core::task::ready;
17
18use ahash::RandomState;
19use crossbeam_utils::Backoff;
20use hashbrown::HashMap;
21use hermit_sync::*;
22#[cfg(target_arch = "riscv64")]
23use riscv::register::sstatus;
24
25use crate::arch::core_local::*;
26#[cfg(target_arch = "riscv64")]
27use crate::arch::switch::switch_to_task;
28#[cfg(target_arch = "x86_64")]
29use crate::arch::switch::{switch_to_fpu_owner, switch_to_task};
30use crate::arch::{get_processor_count, interrupts};
31use crate::fd::{FileDescriptor, ObjectInterface};
32use crate::kernel::scheduler::TaskStacks;
33use crate::scheduler::task::*;
34use crate::{arch, io};
35
36pub mod task;
37
38static NO_TASKS: AtomicU32 = AtomicU32::new(0);
39#[cfg(feature = "smp")]
41static SCHEDULER_INPUTS: SpinMutex<Vec<&InterruptTicketMutex<SchedulerInput>>> =
42 SpinMutex::new(Vec::new());
43#[cfg(all(target_arch = "x86_64", feature = "smp"))]
44static CORE_HLT_STATE: SpinMutex<Vec<&AtomicBool>> = SpinMutex::new(Vec::new());
45static WAITING_TASKS: InterruptTicketMutex<BTreeMap<TaskId, VecDeque<TaskHandle>>> =
47 InterruptTicketMutex::new(BTreeMap::new());
48static TASKS: InterruptTicketMutex<BTreeMap<TaskId, TaskHandle>> =
50 InterruptTicketMutex::new(BTreeMap::new());
51
52pub type CoreId = u32;
54
55#[cfg(feature = "smp")]
56pub(crate) struct SchedulerInput {
57 new_tasks: VecDeque<NewTask>,
59 wakeup_tasks: VecDeque<TaskHandle>,
61}
62
63#[cfg(feature = "smp")]
64impl SchedulerInput {
65 pub fn new() -> Self {
66 Self {
67 new_tasks: VecDeque::new(),
68 wakeup_tasks: VecDeque::new(),
69 }
70 }
71}
72
73#[cfg_attr(any(target_arch = "x86_64", target_arch = "aarch64"), repr(align(128)))]
74#[cfg_attr(
75 not(any(target_arch = "x86_64", target_arch = "aarch64")),
76 repr(align(64))
77)]
78pub(crate) struct PerCoreScheduler {
79 #[cfg(feature = "smp")]
81 core_id: CoreId,
82 current_task: Rc<RefCell<Task>>,
84 idle_task: Rc<RefCell<Task>>,
86 #[cfg(target_arch = "x86_64")]
88 fpu_owner: Rc<RefCell<Task>>,
89 ready_queue: PriorityTaskQueue,
91 finished_tasks: VecDeque<Rc<RefCell<Task>>>,
93 blocked_tasks: BlockedTaskQueue,
95}
96
97pub(crate) trait PerCoreSchedulerExt {
98 fn reschedule(self);
101
102 #[cfg(any(feature = "tcp", feature = "udp"))]
103 fn add_network_timer(self, wakeup_time: Option<u64>);
104
105 fn exit(self, exit_code: i32) -> !;
107}
108
109impl PerCoreSchedulerExt for &mut PerCoreScheduler {
110 #[cfg(target_arch = "x86_64")]
111 fn reschedule(self) {
112 without_interrupts(|| {
113 if let Some(last_stack_pointer) = self.scheduler() {
114 let (new_stack_pointer, is_idle) = {
115 let borrowed = self.current_task.borrow();
116 (
117 borrowed.last_stack_pointer,
118 borrowed.status == TaskStatus::Idle,
119 )
120 };
121
122 if is_idle || Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
123 unsafe {
124 switch_to_fpu_owner(
125 last_stack_pointer,
126 new_stack_pointer.as_u64() as usize,
127 );
128 }
129 } else {
130 unsafe {
131 switch_to_task(last_stack_pointer, new_stack_pointer.as_u64() as usize);
132 }
133 }
134 }
135 });
136 }
137
138 #[cfg(target_arch = "aarch64")]
140 fn reschedule(self) {
141 use core::arch::asm;
142
143 use arm_gic::gicv3::{GicV3, IntId, SgiTarget};
144
145 use crate::interrupts::SGI_RESCHED;
146
147 unsafe {
148 asm!("dsb nsh", "isb", options(nostack, nomem, preserves_flags));
149 }
150
151 let reschedid = IntId::sgi(SGI_RESCHED.into());
152 GicV3::send_sgi(reschedid, SgiTarget::List {
153 affinity3: 0,
154 affinity2: 0,
155 affinity1: 0,
156 target_list: 0b1,
157 });
158
159 interrupts::enable();
160 }
161
162 #[cfg(target_arch = "riscv64")]
163 fn reschedule(self) {
164 without_interrupts(|| self.scheduler());
165 }
166
167 #[cfg(any(feature = "tcp", feature = "udp"))]
168 fn add_network_timer(self, wakeup_time: Option<u64>) {
169 without_interrupts(|| {
170 self.blocked_tasks.add_network_timer(wakeup_time);
171 });
172 }
173
174 fn exit(self, exit_code: i32) -> ! {
175 without_interrupts(|| {
176 let mut current_task_borrowed = self.current_task.borrow_mut();
178 assert_ne!(
179 current_task_borrowed.status,
180 TaskStatus::Idle,
181 "Trying to terminate the idle task"
182 );
183
184 debug!(
186 "Finishing task {} with exit code {}",
187 current_task_borrowed.id, exit_code
188 );
189 current_task_borrowed.status = TaskStatus::Finished;
190 NO_TASKS.fetch_sub(1, Ordering::SeqCst);
191
192 let current_id = current_task_borrowed.id;
193 drop(current_task_borrowed);
194
195 if let Some(mut queue) = WAITING_TASKS.lock().remove(¤t_id) {
197 while let Some(task) = queue.pop_front() {
198 self.custom_wakeup(task);
199 }
200 }
201 });
202
203 self.reschedule();
204 unreachable!()
205 }
206}
207
208struct NewTask {
209 tid: TaskId,
210 func: unsafe extern "C" fn(usize),
211 arg: usize,
212 prio: Priority,
213 core_id: CoreId,
214 stacks: TaskStacks,
215 object_map:
216 Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>>,
217}
218
219impl From<NewTask> for Task {
220 fn from(value: NewTask) -> Self {
221 let NewTask {
222 tid,
223 func,
224 arg,
225 prio,
226 core_id,
227 stacks,
228 object_map,
229 } = value;
230 let mut task = Self::new(tid, core_id, TaskStatus::Ready, prio, stacks, object_map);
231 task.create_stack_frame(func, arg);
232 task
233 }
234}
235
236impl PerCoreScheduler {
237 pub unsafe fn spawn(
239 func: unsafe extern "C" fn(usize),
240 arg: usize,
241 prio: Priority,
242 core_id: CoreId,
243 stack_size: usize,
244 ) -> TaskId {
245 let tid = get_tid();
247 let stacks = TaskStacks::new(stack_size);
248 let new_task = NewTask {
249 tid,
250 func,
251 arg,
252 prio,
253 core_id,
254 stacks,
255 object_map: core_scheduler().get_current_task_object_map(),
256 };
257
258 let wakeup = {
260 #[cfg(feature = "smp")]
261 let mut input_locked = get_scheduler_input(core_id).lock();
262 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
263 TASKS.lock().insert(
264 tid,
265 TaskHandle::new(
266 tid,
267 prio,
268 #[cfg(feature = "smp")]
269 core_id,
270 ),
271 );
272 NO_TASKS.fetch_add(1, Ordering::SeqCst);
273
274 #[cfg(feature = "smp")]
275 if core_id == core_scheduler().core_id {
276 let task = Rc::new(RefCell::new(Task::from(new_task)));
277 core_scheduler().ready_queue.push(task);
278 false
279 } else {
280 input_locked.new_tasks.push_back(new_task);
281 true
282 }
283 #[cfg(not(feature = "smp"))]
284 if core_id == 0 {
285 let task = Rc::new(RefCell::new(Task::from(new_task)));
286 core_scheduler().ready_queue.push(task);
287 false
288 } else {
289 panic!("Invalid core_id {}!", core_id)
290 }
291 };
292
293 debug!(
294 "Creating task {} with priority {} on core {}",
295 tid, prio, core_id
296 );
297
298 if wakeup {
299 arch::wakeup_core(core_id);
300 }
301
302 tid
303 }
304
305 #[cfg(feature = "newlib")]
306 fn clone_impl(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
307 static NEXT_CORE_ID: AtomicU32 = AtomicU32::new(1);
308
309 let core_id: CoreId = {
311 let id = NEXT_CORE_ID.fetch_add(1, Ordering::SeqCst);
313
314 if id == arch::get_processor_count() {
316 NEXT_CORE_ID.store(0, Ordering::SeqCst);
317 0
318 } else {
319 id
320 }
321 };
322
323 let current_task_borrowed = self.current_task.borrow();
325
326 let tid = get_tid();
328 let clone_task = NewTask {
329 tid,
330 func,
331 arg,
332 prio: current_task_borrowed.prio,
333 core_id,
334 stacks: TaskStacks::new(current_task_borrowed.stacks.get_user_stack_size()),
335 object_map: current_task_borrowed.object_map.clone(),
336 };
337
338 let wakeup = {
340 #[cfg(feature = "smp")]
341 let mut input_locked = get_scheduler_input(core_id).lock();
342 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
343 TASKS.lock().insert(
344 tid,
345 TaskHandle::new(
346 tid,
347 current_task_borrowed.prio,
348 #[cfg(feature = "smp")]
349 core_id,
350 ),
351 );
352 NO_TASKS.fetch_add(1, Ordering::SeqCst);
353 #[cfg(feature = "smp")]
354 if core_id == core_scheduler().core_id {
355 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
356 core_scheduler().ready_queue.push(clone_task);
357 false
358 } else {
359 input_locked.new_tasks.push_back(clone_task);
360 true
361 }
362 #[cfg(not(feature = "smp"))]
363 if core_id == 0 {
364 let clone_task = Rc::new(RefCell::new(Task::from(clone_task)));
365 core_scheduler().ready_queue.push(clone_task);
366 false
367 } else {
368 panic!("Invalid core_id {}!", core_id);
369 }
370 };
371
372 if wakeup {
374 arch::wakeup_core(core_id);
375 }
376
377 tid
378 }
379
380 #[cfg(feature = "newlib")]
381 pub fn clone(&self, func: extern "C" fn(usize), arg: usize) -> TaskId {
382 without_interrupts(|| self.clone_impl(func, arg))
383 }
384
385 #[inline]
387 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
388 pub fn is_scheduling(&self) -> bool {
389 self.current_task.borrow().prio < self.ready_queue.get_highest_priority()
390 }
391
392 #[inline]
393 pub fn handle_waiting_tasks(&mut self) {
394 without_interrupts(|| {
395 crate::executor::run();
396 for task in self.blocked_tasks.handle_waiting_tasks() {
397 self.ready_queue.push(task);
398 }
399 });
400 }
401
402 #[cfg(not(feature = "smp"))]
403 pub fn custom_wakeup(&mut self, task: TaskHandle) {
404 without_interrupts(|| {
405 let task = self.blocked_tasks.custom_wakeup(task);
406 self.ready_queue.push(task);
407 });
408 }
409
410 #[cfg(feature = "smp")]
411 pub fn custom_wakeup(&mut self, task: TaskHandle) {
412 if task.get_core_id() == self.core_id {
413 without_interrupts(|| {
414 let task = self.blocked_tasks.custom_wakeup(task);
415 self.ready_queue.push(task);
416 });
417 } else {
418 get_scheduler_input(task.get_core_id())
419 .lock()
420 .wakeup_tasks
421 .push_back(task);
422 arch::wakeup_core(task.get_core_id());
424 }
425 }
426
427 #[inline]
428 pub fn block_current_task(&mut self, wakeup_time: Option<u64>) {
429 without_interrupts(|| {
430 self.blocked_tasks
431 .add(self.current_task.clone(), wakeup_time);
432 });
433 }
434
435 #[inline]
436 pub fn get_current_task_handle(&self) -> TaskHandle {
437 without_interrupts(|| {
438 let current_task_borrowed = self.current_task.borrow();
439
440 TaskHandle::new(
441 current_task_borrowed.id,
442 current_task_borrowed.prio,
443 #[cfg(feature = "smp")]
444 current_task_borrowed.core_id,
445 )
446 })
447 }
448
449 #[inline]
450 pub fn get_current_task_id(&self) -> TaskId {
451 without_interrupts(|| self.current_task.borrow().id)
452 }
453
454 #[inline]
455 pub fn get_current_task_object_map(
456 &self,
457 ) -> Arc<async_lock::RwLock<HashMap<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>>> {
458 without_interrupts(|| self.current_task.borrow().object_map.clone())
459 }
460
461 #[inline]
464 pub async fn get_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
465 future::poll_fn(|cx| {
466 without_interrupts(|| {
467 let borrowed = self.current_task.borrow();
468 let mut pinned_obj = core::pin::pin!(borrowed.object_map.read());
469
470 let guard = ready!(pinned_obj.as_mut().poll(cx));
471 Ready(guard.get(&fd).cloned().ok_or(io::Error::EBADF))
472 })
473 })
474 .await
475 }
476
477 #[allow(dead_code)]
480 pub async fn recreate_objmap(&self) -> io::Result<()> {
481 let mut map = HashMap::<FileDescriptor, Arc<dyn ObjectInterface>, RandomState>::with_hasher(
482 RandomState::with_seeds(0, 0, 0, 0),
483 );
484
485 future::poll_fn(|cx| {
486 without_interrupts(|| {
487 let borrowed = self.current_task.borrow();
488 let mut pinned_obj = core::pin::pin!(borrowed.object_map.read());
489
490 let guard = ready!(pinned_obj.as_mut().poll(cx));
491 for i in 0..3 {
493 if let Some(obj) = guard.get(&i) {
494 map.insert(i, obj.clone());
495 }
496 }
497
498 Ready(io::Result::Ok(()))
499 })
500 })
501 .await?;
502
503 without_interrupts(|| {
504 self.current_task.borrow_mut().object_map = Arc::new(async_lock::RwLock::new(map));
505 });
506
507 Ok(())
508 }
509
510 pub async fn insert_object(&self, obj: Arc<dyn ObjectInterface>) -> io::Result<FileDescriptor> {
513 future::poll_fn(|cx| {
514 without_interrupts(|| {
515 let borrowed = self.current_task.borrow();
516 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
517
518 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
519 let new_fd = || -> io::Result<FileDescriptor> {
520 let mut fd: FileDescriptor = 0;
521 loop {
522 if !guard.contains_key(&fd) {
523 break Ok(fd);
524 } else if fd == FileDescriptor::MAX {
525 break Err(io::Error::EOVERFLOW);
526 }
527
528 fd = fd.saturating_add(1);
529 }
530 };
531
532 let fd = new_fd()?;
533 let _ = guard.insert(fd, obj.clone());
534 Ready(Ok(fd))
535 })
536 })
537 .await
538 }
539
540 pub async fn dup_object(&self, fd: FileDescriptor) -> io::Result<FileDescriptor> {
543 future::poll_fn(|cx| {
544 without_interrupts(|| {
545 let borrowed = self.current_task.borrow();
546 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
547
548 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
549 let obj = (*(guard.get(&fd).ok_or(io::Error::EINVAL)?)).clone();
550
551 let new_fd = || -> io::Result<FileDescriptor> {
552 let mut fd: FileDescriptor = 0;
553 loop {
554 if !guard.contains_key(&fd) {
555 break Ok(fd);
556 } else if fd == FileDescriptor::MAX {
557 break Err(io::Error::EOVERFLOW);
558 }
559
560 fd = fd.saturating_add(1);
561 }
562 };
563
564 let fd = new_fd()?;
565 if guard.try_insert(fd, obj).is_err() {
566 Ready(Err(io::Error::EMFILE))
567 } else {
568 Ready(Ok(fd))
569 }
570 })
571 })
572 .await
573 }
574
575 pub async fn remove_object(&self, fd: FileDescriptor) -> io::Result<Arc<dyn ObjectInterface>> {
577 future::poll_fn(|cx| {
578 without_interrupts(|| {
579 let borrowed = self.current_task.borrow();
580 let mut pinned_obj = core::pin::pin!(borrowed.object_map.write());
581 let mut guard = ready!(pinned_obj.as_mut().poll(cx));
582 Ready(guard.remove(&fd).ok_or(io::Error::EBADF))
583 })
584 })
585 .await
586 }
587
588 #[inline]
589 pub fn get_current_task_prio(&self) -> Priority {
590 without_interrupts(|| self.current_task.borrow().prio)
591 }
592
593 #[allow(dead_code)]
595 #[inline]
596 pub fn get_priority_bitmap(&self) -> &u64 {
597 self.ready_queue.get_priority_bitmap()
598 }
599
600 #[cfg(target_arch = "x86_64")]
601 pub fn set_current_kernel_stack(&self) {
602 let current_task_borrowed = self.current_task.borrow();
603 let tss = unsafe { &mut *CoreLocal::get().tss.get() };
604
605 let rsp = current_task_borrowed.stacks.get_kernel_stack()
606 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
607 - TaskStacks::MARKER_SIZE as u64;
608 tss.privilege_stack_table[0] = rsp.into();
609 CoreLocal::get().kernel_stack.set(rsp.as_mut_ptr());
610 let ist_start = current_task_borrowed.stacks.get_interrupt_stack()
611 + current_task_borrowed.stacks.get_interrupt_stack_size() as u64
612 - TaskStacks::MARKER_SIZE as u64;
613 tss.interrupt_stack_table[0] = ist_start.into();
614 }
615
616 pub fn set_current_task_priority(&mut self, prio: Priority) {
617 without_interrupts(|| {
618 trace!("Change priority of the current task");
619 self.current_task.borrow_mut().prio = prio;
620 });
621 }
622
623 pub fn set_priority(&mut self, id: TaskId, prio: Priority) -> Result<(), ()> {
624 trace!("Change priority of task {} to priority {}", id, prio);
625
626 without_interrupts(|| {
627 let task = get_task_handle(id).ok_or(())?;
628 #[cfg(feature = "smp")]
629 let other_core = task.get_core_id() != self.core_id;
630 #[cfg(not(feature = "smp"))]
631 let other_core = false;
632
633 if other_core {
634 warn!("Have to change the priority on another core");
635 } else if self.current_task.borrow().id == task.get_id() {
636 self.current_task.borrow_mut().prio = prio;
637 } else {
638 self.ready_queue
639 .set_priority(task, prio)
640 .expect("Do not find valid task in ready queue");
641 }
642
643 Ok(())
644 })
645 }
646
647 #[cfg(target_arch = "riscv64")]
648 pub fn set_current_kernel_stack(&self) {
649 let current_task_borrowed = self.current_task.borrow();
650
651 let stack = (current_task_borrowed.stacks.get_kernel_stack()
652 + current_task_borrowed.stacks.get_kernel_stack_size() as u64
653 - TaskStacks::MARKER_SIZE as u64)
654 .as_u64();
655 CoreLocal::get().kernel_stack.set(stack);
656 }
657
658 #[cfg(target_arch = "x86_64")]
661 pub fn fpu_switch(&mut self) {
662 if !Rc::ptr_eq(&self.current_task, &self.fpu_owner) {
663 debug!(
664 "Switching FPU owner from task {} to {}",
665 self.fpu_owner.borrow().id,
666 self.current_task.borrow().id
667 );
668
669 self.fpu_owner.borrow_mut().last_fpu_state.save();
670 self.current_task.borrow().last_fpu_state.restore();
671 self.fpu_owner = self.current_task.clone();
672 }
673 }
674
675 fn cleanup_tasks(&mut self) {
677 while let Some(finished_task) = self.finished_tasks.pop_front() {
679 debug!("Cleaning up task {}", finished_task.borrow().id);
680 }
681 }
682
683 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
684 pub fn check_input(&mut self) {
685 let mut input_locked = CoreLocal::get().scheduler_input.lock();
686
687 while let Some(task) = input_locked.wakeup_tasks.pop_front() {
688 let task = self.blocked_tasks.custom_wakeup(task);
689 self.ready_queue.push(task);
690 }
691
692 while let Some(new_task) = input_locked.new_tasks.pop_front() {
693 let task = Rc::new(RefCell::new(Task::from(new_task)));
694 self.ready_queue.push(task.clone());
695 }
696 }
697
698 pub fn run() -> ! {
702 let backoff = Backoff::new();
703
704 loop {
705 let core_scheduler = core_scheduler();
706 interrupts::disable();
707
708 crate::executor::run();
710
711 #[cfg(all(any(target_arch = "x86_64", target_arch = "riscv64"), feature = "smp"))]
713 core_scheduler.check_input();
714 core_scheduler.cleanup_tasks();
715
716 if core_scheduler.ready_queue.is_empty() {
717 if backoff.is_completed() {
718 interrupts::enable_and_wait();
719 backoff.reset();
720 } else {
721 interrupts::enable();
722 backoff.snooze();
723 }
724 } else {
725 interrupts::enable();
726 core_scheduler.reschedule();
727 backoff.reset();
728 }
729 }
730 }
731
732 #[inline]
733 #[cfg(target_arch = "aarch64")]
734 pub fn get_last_stack_pointer(&self) -> memory_addresses::VirtAddr {
735 self.current_task.borrow().last_stack_pointer
736 }
737
738 pub fn scheduler(&mut self) -> Option<*mut usize> {
741 crate::executor::run();
743
744 self.cleanup_tasks();
747
748 let (id, last_stack_pointer, prio, status) = {
750 let mut borrowed = self.current_task.borrow_mut();
751 (
752 borrowed.id,
753 ptr::from_mut(&mut borrowed.last_stack_pointer).cast::<usize>(),
754 borrowed.prio,
755 borrowed.status,
756 )
757 };
758
759 let mut new_task = None;
760
761 if status == TaskStatus::Running {
762 if let Some(task) = self.ready_queue.pop_with_prio(prio) {
765 new_task = Some(task);
766 }
767 } else {
768 if status == TaskStatus::Finished {
769 self.current_task.borrow_mut().status = TaskStatus::Invalid;
771 self.finished_tasks.push_back(self.current_task.clone());
772 }
773
774 if let Some(task) = self.ready_queue.pop() {
777 debug!("Task is available.");
779 new_task = Some(task);
780 } else if status != TaskStatus::Idle {
781 debug!("Only Idle Task is available.");
783 new_task = Some(self.idle_task.clone());
784 }
785 }
786
787 if let Some(task) = new_task {
788 if status == TaskStatus::Running {
792 self.current_task.borrow_mut().status = TaskStatus::Ready;
794 self.ready_queue.push(self.current_task.clone());
795 }
796
797 let (new_id, new_stack_pointer) = {
799 let mut borrowed = task.borrow_mut();
800 if borrowed.status != TaskStatus::Idle {
801 borrowed.status = TaskStatus::Running;
803 }
804
805 (borrowed.id, borrowed.last_stack_pointer)
806 };
807
808 if id != new_id {
809 debug!(
811 "Switching task from {} to {} (stack {:#X} => {:p})",
812 id,
813 new_id,
814 unsafe { *last_stack_pointer },
815 new_stack_pointer
816 );
817 #[cfg(not(target_arch = "riscv64"))]
818 {
819 self.current_task = task;
820 }
821
822 #[cfg(not(target_arch = "riscv64"))]
824 return Some(last_stack_pointer);
825
826 #[cfg(target_arch = "riscv64")]
827 {
828 if sstatus::read().fs() == sstatus::FS::Dirty {
829 self.current_task.borrow_mut().last_fpu_state.save();
830 }
831 task.borrow().last_fpu_state.restore();
832 self.current_task = task;
833 unsafe {
834 switch_to_task(last_stack_pointer, new_stack_pointer.as_usize());
835 }
836 }
837 }
838 }
839
840 None
841 }
842}
843
844fn get_tid() -> TaskId {
845 static TID_COUNTER: AtomicI32 = AtomicI32::new(0);
846 let guard = TASKS.lock();
847
848 loop {
849 let id = TaskId::from(TID_COUNTER.fetch_add(1, Ordering::SeqCst));
850 if !guard.contains_key(&id) {
851 return id;
852 }
853 }
854}
855
856#[inline]
857pub(crate) fn abort() -> ! {
858 core_scheduler().exit(-1)
859}
860
861pub(crate) fn add_current_core() {
863 let core_id = core_id();
865 let tid = get_tid();
866 let idle_task = Rc::new(RefCell::new(Task::new_idle(tid, core_id)));
867
868 WAITING_TASKS.lock().insert(tid, VecDeque::with_capacity(1));
870 TASKS.lock().insert(
871 tid,
872 TaskHandle::new(
873 tid,
874 IDLE_PRIO,
875 #[cfg(feature = "smp")]
876 core_id,
877 ),
878 );
879 debug!(
881 "Initializing scheduler for core {} with idle task {}",
882 core_id, tid
883 );
884 let boxed_scheduler = Box::new(PerCoreScheduler {
885 #[cfg(feature = "smp")]
886 core_id,
887 current_task: idle_task.clone(),
888 #[cfg(target_arch = "x86_64")]
889 fpu_owner: idle_task.clone(),
890 idle_task,
891 ready_queue: PriorityTaskQueue::new(),
892 finished_tasks: VecDeque::new(),
893 blocked_tasks: BlockedTaskQueue::new(),
894 });
895
896 let scheduler = Box::into_raw(boxed_scheduler);
897 set_core_scheduler(scheduler);
898 #[cfg(feature = "smp")]
899 {
900 SCHEDULER_INPUTS.lock().insert(
901 core_id.try_into().unwrap(),
902 &CoreLocal::get().scheduler_input,
903 );
904 #[cfg(target_arch = "x86_64")]
905 CORE_HLT_STATE
906 .lock()
907 .insert(core_id.try_into().unwrap(), &CoreLocal::get().hlt);
908 }
909}
910
911#[inline]
912#[cfg(all(target_arch = "x86_64", feature = "smp", not(feature = "idle-poll")))]
913pub(crate) fn take_core_hlt_state(core_id: CoreId) -> bool {
914 CORE_HLT_STATE.lock()[usize::try_from(core_id).unwrap()].swap(false, Ordering::Acquire)
915}
916
917#[inline]
918#[cfg(feature = "smp")]
919fn get_scheduler_input(core_id: CoreId) -> &'static InterruptTicketMutex<SchedulerInput> {
920 SCHEDULER_INPUTS.lock()[usize::try_from(core_id).unwrap()]
921}
922
923pub unsafe fn spawn(
924 func: unsafe extern "C" fn(usize),
925 arg: usize,
926 prio: Priority,
927 stack_size: usize,
928 selector: isize,
929) -> TaskId {
930 static CORE_COUNTER: AtomicU32 = AtomicU32::new(1);
931
932 let core_id = if selector < 0 {
933 CORE_COUNTER.fetch_add(1, Ordering::SeqCst) % get_processor_count()
935 } else {
936 selector as u32
937 };
938
939 unsafe { PerCoreScheduler::spawn(func, arg, prio, core_id, stack_size) }
940}
941
942#[allow(clippy::result_unit_err)]
943pub fn join(id: TaskId) -> Result<(), ()> {
944 let core_scheduler = core_scheduler();
945
946 debug!(
947 "Task {} is waiting for task {}",
948 core_scheduler.get_current_task_id(),
949 id
950 );
951
952 loop {
953 let mut waiting_tasks_guard = WAITING_TASKS.lock();
954
955 if let Some(queue) = waiting_tasks_guard.get_mut(&id) {
956 queue.push_back(core_scheduler.get_current_task_handle());
957 core_scheduler.block_current_task(None);
958
959 drop(waiting_tasks_guard);
961 core_scheduler.reschedule();
962 } else {
963 return Ok(());
964 }
965 }
966}
967
968pub fn shutdown(arg: i32) -> ! {
969 crate::syscalls::shutdown(arg)
970}
971
972fn get_task_handle(id: TaskId) -> Option<TaskHandle> {
973 TASKS.lock().get(&id).copied()
974}
975
976#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
977pub(crate) static BOOT_ROOT_PAGE_TABLE: OnceCell<usize> = OnceCell::new();
978
979#[cfg(all(target_arch = "x86_64", feature = "common-os"))]
980pub(crate) fn get_root_page_table() -> usize {
981 let current_task_borrowed = core_scheduler().current_task.borrow_mut();
982 current_task_borrowed.root_page_table
983}