use alloc::{format, string::String, sync::Arc, vec::Vec};
use core::sync::atomic::{AtomicUsize, Ordering};
use bsp_define::smp::NR_CPUS;
use task_define::TaskBase;
use crate::{
processor::{nr_cpus, this_processor_id},
sched::task::{
pid::{Pid, pid_init},
stack::THREAD_STACK_SIZE,
},
space::{
addr::{Vaddr, init_stack_base},
kalloc::{GfpFlags, kfree, kmalloc},
mm::MmStruct,
},
};
mod pid;
pub mod stack;
#[repr(C)]
#[allow(clippy::struct_field_names)]
pub struct Task {
task_base: TaskBase,
name: String,
cpu: AtomicUsize,
mm: Option<Arc<MmStruct>>,
pid: Pid,
}
impl Task {
const fn new() -> Self {
Task {
task_base: TaskBase::new(),
name: String::new(),
cpu: AtomicUsize::new(0),
mm: None,
pid: Pid::new(),
}
}
fn init(name: &str, cpu: usize, mm: Option<Arc<MmStruct>>, stack: usize) -> Self {
let tsk = Self {
task_base: TaskBase::new(),
name: String::from(name),
cpu: AtomicUsize::new(cpu),
mm,
pid: Pid::create(),
};
tsk.task_base.set_stack(stack);
tsk
}
pub fn create(name: &str, cpu: usize, mm: Option<Arc<MmStruct>>) -> Self {
let stack_base = kmalloc(THREAD_STACK_SIZE, GfpFlags::Clean).unwrap().to_value();
Task::init(name, cpu, mm, stack_base)
}
pub fn name(&self) -> &str {
&self.name
}
pub fn cpu(&self) -> usize {
self.cpu.load(Ordering::Relaxed)
}
pub fn mm(&self) -> Option<Arc<MmStruct>> {
self.mm.as_ref().map(Arc::clone)
}
#[inline(always)]
pub fn set_preempt(&self, val: u32) {
self.task_base.set_preempt(val);
}
#[inline(always)]
pub fn preempt(&self) -> u32 {
self.task_base.preempt()
}
#[inline(always)]
pub fn preempt_add(&self, val: u32) {
self.task_base.preempt_add(val);
}
#[inline(always)]
pub fn preempt_sub(&self, val: u32) {
self.task_base.preempt_sub(val);
}
#[inline(always)]
pub fn preempt_inc(&self) {
self.preempt_add(1);
}
#[inline(always)]
pub fn preempt_dec(&self) {
self.preempt_sub(1);
}
#[inline(always)]
pub fn pid(&self) -> usize {
self.pid.to_value()
}
}
impl Drop for Task {
fn drop(&mut self) {
let stack_base = self.task_base.stack();
kfree(Vaddr::from(stack_base));
}
}
#[inline(always)]
pub fn current() -> Arc<Task> {
let this_task = unsafe { PERCPU_ENTRY_TASK[this_processor_id()].as_ref().unwrap() };
this_task.clone()
}
#[inline(always)]
pub fn current_fast() -> &'static Task {
cfg_if::cfg_if! {
if #[cfg(aarch64_seminix)] {
unsafe {
let task: usize;
core::arch::asm!(
"mrs {0}, sp_el0",
out(reg) task
);
&*(task as *const Task)
}
} else {
unreachable!()
}
}
}
#[inline]
pub(crate) fn get_idle_task_info(cpu: usize) -> (usize, usize) {
unsafe {
let ts = PERCPU_ENTRY_TASK_RAW[cpu];
let task = &*(ts as *const Task);
let stack = task.task_base.stack();
(ts, stack)
}
}
#[inline(always)]
#[allow(unused_variables)]
fn set_current_fast(ts: usize) {
cfg_if::cfg_if! {
if #[cfg(aarch64_seminix)] {
unsafe {
core::arch::asm!(
"msr sp_el0, {0}",
in(reg) ts
);
}
} else {
unreachable!()
}
}
}
static DUMMY_TASK: Task = Task::new();
pub(crate) fn early_task_init() {
set_current_fast(&raw const DUMMY_TASK as usize);
}
fn root_task_init() -> usize {
let tsk = Arc::new(Task::init(
"root_server",
0,
Some(Arc::new(MmStruct::create().unwrap())),
init_stack_base().to_value(),
));
let raw_tsk = tsk.as_ref() as *const Task as usize;
switch_entry_task(0, tsk);
raw_tsk
}
#[allow(static_mut_refs)]
pub(crate) fn task_init() {
unsafe {
for _ in 0..nr_cpus() {
PERCPU_ENTRY_TASK.push(None);
}
}
pid_init();
let root_tsk = root_task_init();
for idx in 1..nr_cpus() {
let idle = idle_create(idx);
switch_entry_task(idx, idle);
}
let preempt = current_fast().preempt();
set_current_fast(root_tsk);
assert_eq!(preempt, current_fast().preempt());
current().mm().unwrap().switch_mm();
}
pub(crate) fn idle_create(cpu: usize) -> Arc<Task> {
Arc::new(Task::create(&format!("idle/{cpu}"), cpu, None))
}
#[inline(always)]
pub(crate) fn switch_entry_task(cpu: usize, tsk: Arc<Task>) {
let raw_tsk = tsk.as_ref() as *const Task as usize;
unsafe {
PERCPU_ENTRY_TASK[cpu] = Some(tsk);
PERCPU_ENTRY_TASK_RAW[cpu] = raw_tsk;
}
}
static mut PERCPU_ENTRY_TASK: Vec<Option<Arc<Task>>> = Vec::new();
#[unsafe(no_mangle)]
static mut PERCPU_ENTRY_TASK_RAW: [usize; NR_CPUS] = [0; NR_CPUS];