hopter/task/task_struct.rs
1use super::{
2 priority::TaskPriority,
3 segmented_stack::{self, StackCtrlBlock},
4 trampoline, TaskBuildError,
5};
6use crate::{
7 config,
8 interrupt::{svc, trap_frame::TrapFrame},
9 schedule::scheduler::TaskQuota,
10 sync::{AtomicCell, Spin},
11 unrecoverable::{self, Lethal},
12};
13use alloc::{boxed::Box, sync::Arc};
14use core::{
15 alloc::Layout,
16 num::NonZeroUsize,
17 sync::atomic::{AtomicBool, AtomicPtr, AtomicU32, AtomicU8, Ordering},
18};
19use intrusive_collections::{intrusive_adapter, LinkedListAtomicLink};
20use static_assertions::const_assert;
21
22#[cfg(feature = "unwind")]
23use alloc::sync::Weak;
24#[cfg(feature = "unwind")]
25use core::any::Any;
26
27#[repr(u8)]
28#[derive(PartialEq, Clone, Copy)]
29/// All possible states of a task.
30pub(crate) enum TaskState {
31 /// The task is under initialization. Not ready to run.
32 Initializing,
33 /// The task is waiting for an event, e.g., a semaphore notification
34 /// or a timer expiration.
35 Blocked,
36 /// The task is ready to run.
37 Ready,
38 /// The task is running on the CPU.
39 Running,
40 /// The task is under destruction.
41 Destructing,
42}
43
44#[repr(C)]
45#[derive(Default)]
46/// Callee-saved general purpose registers on Cortex-M.
47struct CalleeSavedGPRegs {
48 r4: u32,
49 r5: u32,
50 r6: u32,
51 r7: u32,
52 r8: u32,
53 r9: u32,
54 r10: u32,
55 r11: u32,
56}
57
58#[cfg(armv7em)]
59#[repr(C)]
60#[derive(Default)]
61/// Callee-saved floating point registers on Cortex-M.
62struct CalleeSavedFPRegs {
63 s16: u32,
64 s17: u32,
65 s18: u32,
66 s19: u32,
67 s20: u32,
68 s21: u32,
69 s22: u32,
70 s23: u32,
71 s24: u32,
72 s25: u32,
73 s26: u32,
74 s27: u32,
75 s28: u32,
76 s29: u32,
77 s30: u32,
78 s31: u32,
79}
80
81/// The task local storage to support the segmented stack and deferred panic.
82#[repr(C)]
83#[derive(Default)]
84pub(crate) struct TaskLocalStorage {
85 /// The boundary address of the top stacklet.
86 pub(crate) stklet_bound: u32,
87 /// The number of drop functions that are currently present in the function
88 /// call stack. The modified compiler toolchain generates a prologue for
89 /// each drop function that increments the counter as well as an epilogue
90 /// that decrements it. Note that drop functions may nest so the value can
91 /// be greater than 1.
92 pub(crate) nested_drop_cnt: u32,
93 /// A boolean flag indicating if a panic call is pending. We cannot inject
94 /// a panic to a task if the task is running a drop handler function, in
95 /// which case we just set the panic pending flag. The modified compiler
96 /// toolchain generates an epilogue that checks this flag if the
97 /// [`nested_drop_cnt`](Self::nested_drop_cnt) is zero and diverts to panic
98 /// if the flag is set to true. See [`crate::unwind::forced`] for details.
99 pub(crate) unwind_pending: u32,
100}
101
102#[repr(C)]
103#[derive(Default)]
104/// The context of a task managed by the kernel. The struct does not store
105/// caller-saved registers because these registers are pushed to the user stack
106/// before context switch.
107pub(crate) struct TaskCtxt {
108 /// Preserved task local storage fields.
109 tls: TaskLocalStorage,
110 /// The stack pointer value.
111 sp: u32,
112 /// Preserved callee-saved general purpose registers.
113 gp_regs: CalleeSavedGPRegs,
114 /// Preserved callee-saved floating point registers.
115 #[cfg(armv7em)]
116 fp_regs: CalleeSavedFPRegs,
117}
118
119/// Representing the configuration of a task's stack.
120#[derive(Clone)]
121pub(crate) enum StackConfig {
122 /// Allocate the whole stack as a single contiguous chunk ahead of time.
123 Static {
124 /// The size of the stack.
125 limit: NonZeroUsize,
126 },
127 /// Allocate the stack on demand in small chunks called stacklets.
128 Dynamic {
129 /// The size of the first stacklet.
130 initial: Option<NonZeroUsize>,
131 /// The maximum size of all stacklets, excluding stacklet overhead.
132 limit: Option<NonZeroUsize>,
133 },
134}
135
136/// The struct representing a task.
137pub(crate) struct Task {
138 /// When dropped it will decrement the number of existing tasks by 1.
139 _quota: TaskQuota,
140 /// The task context preserved in the kernel. When a task is scheduled to
141 /// run on the CPU, the spin lock will be acquired during the running
142 /// period. Accidental attempt to modify the context of a running task
143 /// will result in a deadlock, which can help us track down the bug
144 /// faster. The spin lock will be released when the running task is
145 /// switched out during context switch.
146 ///
147 /// Note that this field remains being locked when the task makes an SVC.
148 /// The SVC handlers should instead use
149 /// [`TaskSVCCtxt`](crate::interrupt::svc_handler::TaskSVCCtxt)
150 /// to read or modify the task's context.
151 ctxt: Spin<TaskCtxt>,
152 /// The initial stacklet of a task. Semantically, this pointer owns the
153 /// piece of memory it points to. The drop handler must free the memory
154 /// to avoid memory leak.
155 initial_stklet: AtomicPtr<u8>,
156 /// Configuration for the function call stack.
157 stack_config: StackConfig,
158 /// A numerical task ID that does not have functional purpose. It is
159 /// only for diagnostic purpose.
160 id: AtomicU8,
161 /// Whether the task is the idle task.
162 is_idle: bool,
163 /// See [`TaskState`].
164 state: AtomicCell<TaskState>,
165
166 /*** Fields for unwinding. ***/
167 /// Set only when the task is unwinding.
168 #[cfg(feature = "unwind")]
169 is_unwinding: AtomicBool,
170 /// Set when a panicked task has been restarted with a new concurrent task
171 /// context.
172 #[cfg(feature = "unwind")]
173 has_restarted: AtomicBool,
174 #[cfg(feature = "unwind")]
175 concurrent_restartable: AtomicBool,
176
177 /*** Fields present only for restartable tasks. ***/
178 /// An `Arc` pointing to the bundled struct containing the task entry
179 /// closure. The type of the closure are erased using `Arc<dyn Any>`, so
180 /// that all task structs will have an identical type `Task`, rather than
181 /// `Task<F>` with different `F`.
182 #[cfg(feature = "unwind")]
183 entry_closure: Option<Arc<dyn Any + Send + Sync + 'static>>,
184 /// A function that can cast the `entry_closure` field from an
185 /// `Arc<dyn Any>` to `*const u8`. The resulting raw pointer is used in
186 /// the task entry trampoline function.
187 #[cfg(feature = "unwind")]
188 downcast_func: Option<fn(&(dyn Any + Send + Sync + 'static)) -> *const u8>,
189 /// Set when the task is a restarted instance of another panicked task.
190 #[cfg(feature = "unwind")]
191 restarted_from: Option<Weak<Task>>,
192 /// The entry trampoline function the restarted task should run after
193 /// being created.
194 #[cfg(feature = "unwind")]
195 restart_entry_trampoline: Option<extern "C" fn(*const u8)>,
196
197 /*** Fields for segmented stack control. ***/
198 /// The recorded information used to control segmented stack growth and
199 /// alleviate the hot-split problem. This field can be accessed
200 /// concurrently from both the task context during unwinding and SVC
201 /// context when allocating additional stacklet for the unwinder. Thus,
202 /// the field must not use a spin lock or it may deadlock. All fields
203 /// inside [`StackCtrlBlock`] are atomic integers that allow read/write
204 /// access through shared references.
205 scb: Option<Box<StackCtrlBlock>>,
206
207 /*** Fields for priority scheduling and sleeping. ***/
208 /// See [`TaskPriority`].
209 priority: AtomicCell<TaskPriority>,
210 /// The tick number when a sleeping task should be woken up. This field is
211 /// meaningful only the task is sleeping.
212 wake_at_tick: AtomicU32,
213
214 /*** Fields for task linked list. ***/
215 /// The link field for this struct to form an intrusive linked list.
216 /// Invariant: a task struct can be inside at most one intrusive linked
217 /// list. Trying to push a task which is already in a linked list into
218 /// another linked list will result in a panic.
219 pub(super) linked_list_link: LinkedListAtomicLink,
220}
221
222// Make sure the `AtomicCell`s used in `Task`'s fields are lock-free to prevent
223// deadlocks.
224const_assert!(AtomicCell::<TaskState>::is_lock_free());
225const_assert!(AtomicCell::<TaskPriority>::is_lock_free());
226
227/// Task struct builder functions.
228impl Task {
229 /// Build a new task struct. Return `Ok(())` if successful, otherwise
230 /// `Err(())`. When the built task panics during its execution, the task's
231 /// stack will be unwound, and then the task will be *terminated*.
232 ///
233 /// - `id`: A numerical task ID that does not have functional purpose. It
234 /// is only for diagnostic purpose.
235 /// - `entry_closure`: The entry closure for the new task, i.e., the code
236 /// where the new task starts to execute.
237 /// - `stack_config`: The configuration of the function call stack.
238 /// - `priority`: The priority of the task. Smaller numerical values
239 /// represent higher priority.
240 pub(crate) fn build<F>(
241 quota: TaskQuota,
242 id: u8,
243 entry_closure: F,
244 stack_config: StackConfig,
245 priority: u8,
246 ) -> Result<Self, TaskBuildError>
247 where
248 F: FnOnce() + Send + 'static,
249 {
250 let mut task = Self::new(quota, false);
251 task.initialize(id, entry_closure, stack_config, priority)?;
252 Ok(task)
253 }
254
255 /// Build a new restartable task struct with concurrent restart. Return
256 /// `Ok(())` if successful, otherwise `Err(())`. When the built task panics
257 /// during its execution, the task's stack will be unwound, and then the
258 /// task will be *restarted*. The restarted task will start its execution
259 /// again from the same entry closure.
260 ///
261 /// - `id`: A numerical task ID that does not have functional purpose. It
262 /// is only for diagnostic purpose.
263 /// - `entry_closure`: The entry closure for the new task, i.e., the code
264 /// where the new task starts to execute.
265 /// - `stack_config`: The configuration of the function call stack.
266 /// - `priority`: The priority of the task. Smaller numerical values
267 /// represent higher priority.
268 #[cfg(feature = "unwind")]
269 pub(crate) fn build_restartable<F>(
270 quota: TaskQuota,
271 id: u8,
272 entry_closure: F,
273 stack_config: StackConfig,
274 priority: u8,
275 ) -> Result<Self, TaskBuildError>
276 where
277 F: FnOnce() + Send + Sync + Clone + 'static,
278 {
279 let mut task = Self::new(quota, false);
280 task.initialize_restartable(id, entry_closure, stack_config, priority, true)?;
281 Ok(task)
282 }
283
284 /// Build a new restartable task struct without concurrent restart. Return
285 /// `Ok(())` if successful, otherwise `Err(())`. When the built task panics
286 /// during its execution, the task's stack will be unwound, and then the
287 /// task will be *restarted*. The restarted task will start its execution
288 /// again from the same entry closure.
289 ///
290 /// - `id`: A numerical task ID that does not have functional purpose. It
291 /// is only for diagnostic purpose.
292 /// - `entry_closure`: The entry closure for the new task, i.e., the code
293 /// where the new task starts to execute.
294 /// - `stack_config`: The configuration of the function call stack.
295 /// - `priority`: The priority of the task. Smaller numerical values
296 /// represent higher priority.
297 #[cfg(feature = "unwind")]
298 pub(crate) fn build_restartable_no_concur<F>(
299 quota: TaskQuota,
300 id: u8,
301 entry_closure: F,
302 stack_config: StackConfig,
303 priority: u8,
304 ) -> Result<Self, TaskBuildError>
305 where
306 F: FnOnce() + Send + Sync + Clone + 'static,
307 {
308 let mut task = Self::new(quota, false);
309 task.initialize_restartable(id, entry_closure, stack_config, priority, false)?;
310 Ok(task)
311 }
312
313 /// Build a new task struct as the restarted instance of a previously
314 /// panicked task. The new task will start its execution from the same
315 /// closure as the panicked task.
316 #[cfg(feature = "unwind")]
317 pub(crate) fn build_restarted(quota: TaskQuota, prev_task: Arc<Task>) -> Self {
318 let mut new_task = Self::new(quota, false);
319 new_task.restart_from(prev_task.clone());
320 new_task
321 }
322
323 /// Build the task struct of the idle task.
324 pub(crate) fn build_idle(quota: TaskQuota) -> Self {
325 // Make sure the idle task is built only once. It is an unrecoverable
326 // error if attempt to build it twice.
327 static IDLE_CREATED: AtomicBool = AtomicBool::new(false);
328 let created = IDLE_CREATED.swap(true, Ordering::SeqCst);
329 unrecoverable::die_if(|| created);
330
331 let mut idle_task = Self::new(quota, true);
332
333 let stack_config = StackConfig::Dynamic {
334 initial: None,
335 limit: None,
336 };
337
338 // Create the idle task. The closure passed in `.initialize()` is
339 // actually not used. The `idle()` function is invoked through the
340 // assembly sequence when starting the scheduler.
341 idle_task
342 .initialize(
343 config::IDLE_TASK_ID,
344 || unrecoverable::die(),
345 stack_config,
346 config::IDLE_TASK_PRIORITY,
347 )
348 .unwrap_or_die();
349
350 // We are about to transmute the current thread as the idle task.
351 // Mark the idle task as `Running`.
352 idle_task.set_state(TaskState::Running);
353
354 idle_task
355 }
356
357 /// Create a new task struct, with all the fields set to their default
358 /// values.
359 fn new(quota: TaskQuota, is_idle: bool) -> Self {
360 Self {
361 _quota: quota,
362 ctxt: Spin::new(TaskCtxt::default()),
363 id: AtomicU8::new(0),
364 is_idle,
365 state: AtomicCell::new(TaskState::Initializing),
366 initial_stklet: AtomicPtr::new(core::ptr::null_mut()),
367 #[cfg(feature = "unwind")]
368 is_unwinding: AtomicBool::new(false),
369 #[cfg(feature = "unwind")]
370 has_restarted: AtomicBool::new(false),
371 #[cfg(feature = "unwind")]
372 concurrent_restartable: AtomicBool::new(false),
373 #[cfg(feature = "unwind")]
374 entry_closure: None,
375 #[cfg(feature = "unwind")]
376 downcast_func: None,
377 #[cfg(feature = "unwind")]
378 restart_entry_trampoline: None,
379 #[cfg(feature = "unwind")]
380 restarted_from: None,
381 stack_config: StackConfig::Dynamic {
382 initial: None,
383 limit: None,
384 },
385 scb: None,
386 priority: AtomicCell::new(TaskPriority::new_intrinsic(
387 config::TASK_PRIORITY_LEVELS - 1,
388 )),
389 linked_list_link: LinkedListAtomicLink::new(),
390 wake_at_tick: AtomicU32::new(u32::MAX),
391 }
392 }
393
394 /// The common part of initializing a task struct.
395 ///
396 /// - `id`: A numerical task ID that does not have functional purpose. It
397 /// is only for diagnostic purpose.
398 /// - `entry_closure_ptr`: Raw pointer to the entry closure.
399 /// - `entry_trampoline`: The address of the trampoline function of the new
400 /// task.
401 /// - `stack_config`: The configuration of the function call stack.
402 /// - `priority`: The priority of the task. Smaller numerical values
403 /// represent higher priority.
404 fn initialize_common(
405 &mut self,
406 id: u8,
407 entry_closure_ptr: usize,
408 entry_trampoline: usize,
409 stack_config: StackConfig,
410 priority: u8,
411 ) -> Result<(), TaskBuildError> {
412 // Check priority number validity.
413 if priority >= config::TASK_PRIORITY_LEVELS {
414 return Err(TaskBuildError::PriorityNotAllowed);
415 }
416
417 let stack_alloc_size;
418
419 match stack_config {
420 // For static stack just allocate the whole stack ahead of time.
421 StackConfig::Static { limit } => {
422 stack_alloc_size = limit.get();
423 }
424 // For dynamic stack, just allocate the initial stacklet. Also
425 // create a stack control block.
426 StackConfig::Dynamic { initial, .. } => {
427 self.scb.replace(Box::new(StackCtrlBlock::default()));
428 stack_alloc_size = initial.map(|size| size.get()).unwrap_or(0);
429 }
430 }
431
432 // Allocate the initial stacklet. `stklet_begin` points to the
433 // beginning of the allocated memory chunk, and can be used to call
434 // `alloc::alloc::dealloc()` to free the memory. `stklet_end` points to
435 // the ending of the memory chunk. The allocated memory chunk is *not*
436 // zero-initialized.
437 let (stklet_begin, stklet_end) = segmented_stack::alloc_initial_stacklet(stack_alloc_size);
438
439 // Store stacklet to the task struct.
440 self.initial_stklet.store(stklet_begin, Ordering::SeqCst);
441
442 // Preserve the stack configuration to be used for task restart.
443 self.stack_config = stack_config;
444
445 // Let the stack pointer points to the bottom of the initial stacklet.
446 let mut sp = stklet_end;
447
448 // Normal tasks (not idle) are started by an exception return. The
449 // initial state is indicated by the trap frame stored on the task's
450 // stack. In the following we will build the initial trap frame which
451 // is placed at the bottom of the initial stacklet.
452 //
453 // However, the idle task cannot be started by an exception return,
454 // because after boot and during initialization, the CPU runs in thread
455 // mode with MSP. It will trigger a HardFault if we try to perform an
456 // exception return in thread mode. Thus, we will manually switch the
457 // stack pointer to PSP and jump to the idle function with the assembly
458 // code in `start_scheduler()`. We need not put a trap frame in idle
459 // task's initial stacklet.
460 if !self.is_idle {
461 // Move the stack pointer to make space for the trap frame.
462 // Safety: The size of the initial stacklet is guaranteed to be
463 // larger than the size of a trap frame. Thus, the pointer offset
464 // must be within bounds.
465 unsafe {
466 sp = sp.sub(core::mem::size_of::<TrapFrame>());
467 }
468
469 // Initialize the trap frame to its default values.
470 //
471 // Safety: The stack memory is just allocated, so the current code
472 // has exclusive access to the memory.
473 let tf_ptr = sp as *mut TrapFrame;
474 unsafe {
475 tf_ptr.write(TrapFrame::default());
476 };
477
478 {
479 // Safety: The stack memory is just allocated, so the current code
480 // has exclusive access to the memory. The memory is initialized
481 // above.
482 let tf = unsafe { &mut *tf_ptr };
483
484 // The only bit we need to set in the program state register (PSR)
485 // is the Thumb bit (the 24th). Cortex-M4F always run in Thumb state,
486 // so we must always set the Thumb bit.
487 tf.gp_regs.xpsr = 0x01000000;
488
489 // This is the actual return address of the exception handler. We set
490 // it to the entry function of the new task.
491 tf.gp_regs.pc = entry_trampoline as u32 | 1;
492
493 // Let the trampoline function return to task destroy SVC.
494 tf.gp_regs.lr = svc::svc_destroy_current_task as u32 | 1;
495
496 // Make set the trampoline function argument as the closure pointer.
497 tf.gp_regs.r0 = entry_closure_ptr as u32;
498 }
499 }
500
501 // Set relevant infomation in the task context.
502 let ctxt = self.ctxt.get_mut();
503 ctxt.sp = sp as u32;
504 ctxt.tls.stklet_bound = segmented_stack::stklet_ptr_to_bound(stklet_begin) as u32;
505
506 // Set other task information.
507 self.id.store(id, Ordering::SeqCst);
508 self.state.store(TaskState::Ready);
509 self.priority.store(TaskPriority::new_intrinsic(priority));
510
511 Ok(())
512 }
513
514 /// Initialize the task struct for a non-restartable task.
515 ///
516 /// - `id`: A numerical task ID that does not have functional purpose. It
517 /// is only for diagnostic purpose.
518 /// - `entry_closure`: The entry closure for the new task, i.e., the code
519 /// where the new task starts to execute.
520 /// - `stack_config`: The configuration of the function call stack.
521 /// - `priority`: The priority of the task. Smaller numerical values
522 /// represent higher priority.
523 fn initialize<F>(
524 &mut self,
525 id: u8,
526 entry_closure: F,
527 stack_config: StackConfig,
528 priority: u8,
529 ) -> Result<(), TaskBuildError>
530 where
531 F: FnOnce() + Send + 'static,
532 {
533 // Bundle the entry closure, and put it onto the heap.
534 let boxed_closure = Box::new(entry_closure);
535
536 // Get the raw pointer to the bundle.
537 let closure_ptr = Box::into_raw(boxed_closure) as usize;
538
539 // Get the function address of the entry trampoline.
540 let entry_trampoline = trampoline::task_entry::<F> as usize;
541
542 // Perform other common initialization.
543 self.initialize_common(id, closure_ptr, entry_trampoline, stack_config, priority)
544 }
545
546 /// Initialize the task struct for a restartable task.
547 ///
548 /// - `id`: A numerical task ID that does not have functional purpose. It
549 /// is only for diagnostic purpose.
550 /// - `entry_closure`: The entry closure for the new task, i.e., the code
551 /// where the new task starts to execute.
552 /// - `stack_config`: The configuration of the function call stack.
553 /// - `priority`: The priority of the task. Smaller numerical values
554 /// represent higher priority.
555 #[cfg(feature = "unwind")]
556 fn initialize_restartable<F>(
557 &mut self,
558 id: u8,
559 entry_closure: F,
560 stack_config: StackConfig,
561 priority: u8,
562 allow_concurrent_restart: bool,
563 ) -> Result<(), TaskBuildError>
564 where
565 F: FnOnce() + Send + Sync + Clone + 'static,
566 {
567 // Bundle the entry closure, and put it onto the heap.
568 let arc_closure = Arc::new(entry_closure);
569
570 // Store the bundle to the task struct, so that we can use it again
571 // during task restart.
572 self.entry_closure = Some(arc_closure);
573
574 // Use downcast function to get the raw pointer to the bundle.
575 let downcast_func = trampoline::downcast_to_ptr::<F>;
576 let closure_ptr =
577 downcast_func(self.entry_closure.as_ref().unwrap_or_die().as_ref()) as usize;
578
579 // Get the function address of the entry trampoline.
580 let entry_trampoline = trampoline::restartable_task_entry::<F>;
581
582 // Store the downcast function to the task struct, so that we can call
583 // it again during task restart.
584 self.downcast_func = Some(downcast_func);
585
586 // Store the trampoline to the task struct, so that we can call it again
587 // during task restart.
588 self.restart_entry_trampoline = Some(entry_trampoline);
589
590 self.concurrent_restartable
591 .store(allow_concurrent_restart, Ordering::SeqCst);
592
593 // Perform other common initialization.
594 self.initialize_common(
595 id,
596 closure_ptr,
597 entry_trampoline as usize,
598 stack_config,
599 priority,
600 )
601 }
602
603 /// Initialize the task struct to create a restarted instance of a panicked
604 /// task.
605 ///
606 /// - `prev_task`: The panicked task.
607 #[cfg(feature = "unwind")]
608 fn restart_from(&mut self, prev_task: Arc<Task>) {
609 // The task ID is kept the same as the panicked task.
610 let id = prev_task.id.load(Ordering::SeqCst);
611
612 // Clone restart relevant fields from the panicked task struct.
613 self.downcast_func = prev_task.downcast_func.clone();
614 self.entry_closure = prev_task.entry_closure.clone();
615 self.restart_entry_trampoline = prev_task.restart_entry_trampoline.clone();
616
617 // Unwrap the downcast function and the entry closure and. Get the raw
618 // pointer to the closure using the downcast function.
619 let downcast_func = self.downcast_func.unwrap_or_die();
620 let entry_closure = self.entry_closure.as_ref().unwrap_or_die();
621 let closure_ptr = downcast_func(entry_closure.as_ref()) as usize;
622
623 // Unwrap the entry trampoline function. Get its address.
624 let entry_trampoline = self.restart_entry_trampoline.unwrap_or_die() as usize;
625
626 // Get the intrinsic priority of the panicked task. The new task will
627 // have the same priority.
628 let priority = prev_task.priority.load().intrinsic_priority();
629
630 // Record that this new task is a restarted instance from the panicked
631 // task.
632 self.restarted_from = Some(Arc::downgrade(&prev_task));
633
634 self.concurrent_restartable.store(
635 prev_task.concurrent_restartable.load(Ordering::SeqCst),
636 Ordering::SeqCst,
637 );
638
639 // Record that the panicked task has been restarted. This will prevent
640 // other restart attempt.
641 prev_task.has_restarted.store(true, Ordering::SeqCst);
642
643 // Perform other common initialization.
644 self.initialize_common(
645 id,
646 closure_ptr,
647 entry_trampoline,
648 prev_task.stack_config.clone(),
649 priority,
650 )
651 .unwrap_or_die()
652 }
653}
654
655/// Field getters and Setters.
656impl Task {
657 pub(crate) fn get_sp(&mut self) -> u32 {
658 self.ctxt.get_mut().sp
659 }
660
661 pub(crate) fn get_stk_bound(&mut self) -> u32 {
662 self.ctxt.get_mut().tls.stklet_bound
663 }
664
665 pub(crate) fn get_state(&self) -> TaskState {
666 self.state.load()
667 }
668
669 pub(crate) fn set_state(&self, state: TaskState) {
670 self.state.store(state);
671 }
672
673 pub(crate) fn get_id(&self) -> u8 {
674 self.id.load(Ordering::SeqCst)
675 }
676
677 pub(crate) fn is_idle(&self) -> bool {
678 self.is_idle
679 }
680
681 #[cfg(feature = "unwind")]
682 pub(crate) fn set_unwind_flag(&self, val: bool) {
683 self.is_unwinding.store(val, Ordering::SeqCst);
684 }
685
686 #[cfg(feature = "unwind")]
687 pub(crate) fn is_unwinding(&self) -> bool {
688 self.is_unwinding.load(Ordering::SeqCst)
689 }
690
691 pub(crate) fn get_wake_tick(&self) -> u32 {
692 self.wake_at_tick.load(Ordering::SeqCst)
693 }
694
695 #[cfg(feature = "unwind")]
696 pub(crate) fn get_restart_origin_task(&self) -> Option<&Weak<Task>> {
697 self.restarted_from.as_ref()
698 }
699
700 pub(crate) fn set_wake_tick(&self, tick: u32) {
701 self.wake_at_tick.store(tick, Ordering::SeqCst);
702 }
703
704 #[cfg(feature = "unwind")]
705 pub(crate) fn has_restarted(&self) -> bool {
706 self.has_restarted.load(Ordering::SeqCst)
707 }
708
709 #[cfg(feature = "unwind")]
710 pub(crate) fn is_restartable(&self) -> bool {
711 self.entry_closure.is_some()
712 }
713
714 #[cfg(feature = "unwind")]
715 pub(crate) fn allow_concurrent_restart(&self) -> bool {
716 self.concurrent_restartable.load(Ordering::SeqCst)
717 }
718
719 /// Lock the task context and return the mutable raw pointer to the
720 /// context. The pointer is used by the context switch assembly sequence
721 /// in [`context_switch`](crate::interrupt::context_switch).
722 /// See [`Task`] for the invariants of the context lock.
723 pub(crate) fn lock_ctxt(&self) -> *mut TaskCtxt {
724 let mut locked_ctxt = self.ctxt.lock_now_or_die();
725 let ptr = &mut *locked_ctxt as *mut _;
726 core::mem::forget(locked_ctxt);
727 ptr
728 }
729
730 /// Force unlock the task context. This method should be called only when
731 /// context switching a task out of the CPU. See [`Task`] for the
732 /// invariants of the context lock and also [`lock_ctxt`](Self::lock_ctxt).
733 ///
734 /// Safety: When calling this method the context lock must have been
735 /// acquired when the task was being context switched on to the CPU.
736 pub(crate) unsafe fn force_unlock_ctxt(&self) {
737 self.ctxt.force_unlock()
738 }
739
740 /// Run the provided closure with [`StackCtrlBlock`] if the task
741 /// has it and wrap the return value with `Some(_)`. Otherwise if the task
742 /// has no [`StackCtrlBlock`], return `None`.
743 pub(crate) fn with_stack_ctrl_block<F, R>(&self, op: F) -> Option<R>
744 where
745 F: FnOnce(&StackCtrlBlock) -> R,
746 {
747 self.scb.as_ref().map(|scb| op(&*scb))
748 }
749
750 pub(super) fn get_stack_limit(&self) -> Option<usize> {
751 match self.stack_config {
752 StackConfig::Static { limit } => Some(limit.get()),
753 StackConfig::Dynamic { limit, .. } => limit.map(|size| size.get()),
754 }
755 }
756}
757
758/// Priority related.
759impl Task {
760 /// Get the priority of this task.
761 pub(crate) fn get_priority(&self) -> TaskPriority {
762 self.priority.load()
763 }
764
765 pub(crate) fn change_intrinsic_priority(&self, prio: u8) {
766 let new_prio = TaskPriority::change_intrinsic(&self.priority.load(), prio);
767 self.priority.store(new_prio);
768 }
769
770 /// If the other given task has higher priority, inherit it. Otherwise,
771 /// keep the current priority.
772 ///
773 /// Note: even if the task inherits priority from the given task, its
774 /// intrinsic priority will still be kept and can be restored at any time.
775 pub(crate) fn ceil_priority_from(&self, other: &Self) {
776 let self_prio = self.priority.load();
777 let other_prio = other.priority.load();
778 if let Ok(inherited_prio) = TaskPriority::try_inherit_from(&self_prio, &other_prio) {
779 self.priority.store(inherited_prio);
780 }
781 }
782
783 /// Set the priority of the task to its intrinsic value, i.e. the one given
784 /// at task creation time.
785 pub(crate) fn restore_intrinsic_priority(&self) {
786 let intrinsic_prio = TaskPriority::restore_intrinsic(&self.priority.load());
787 self.priority.store(intrinsic_prio);
788 }
789
790 /// Return true if and only if this task has higher priority than the other
791 /// task.
792 pub(crate) fn should_preempt(&self, other: &Self) -> bool {
793 if config::ALLOW_TASK_PREEMPTION {
794 self.priority.load() < other.priority.load()
795 } else {
796 false
797 }
798 }
799}
800
801// Create the adapter for the intrusive linked list of task structs.
802intrusive_adapter!(
803 pub(crate) TaskListAdapter
804 = Arc<Task>: Task { linked_list_link: LinkedListAtomicLink }
805);
806
807impl Drop for Task {
808 /// When dropping a task struct, we should free the initial stacklet.
809 fn drop(&mut self) {
810 let stklet_ptr = self.initial_stklet.load(Ordering::SeqCst);
811
812 if !stklet_ptr.is_null() {
813 // Safety: Semantically, `initial_stklet` owns the memory it points
814 // to. The memory was dynamically allocated. We must free it to
815 // avoid memory leaks.
816 unsafe {
817 // Layout is not used in the current dealloc implementation.
818 alloc::alloc::dealloc(stklet_ptr, Layout::new::<u8>());
819 }
820 }
821 }
822}
823
824impl PartialEq for Task {
825 /// Different tasks are never considered equal. A task is only equal to
826 /// itself. Thus the function returns `true` only when the two references
827 /// point to the same address.
828 fn eq(&self, other: &Self) -> bool {
829 self as *const _ as usize == other as *const _ as usize
830 }
831}