sparreal_kernel/globals/
percpu.rs1use core::{
2 alloc::Layout,
3 fmt::Display,
4 ops::Range,
5 sync::atomic::{AtomicBool, Ordering},
6};
7
8use alloc::{alloc::alloc, collections::btree_map::BTreeMap};
9use log::debug;
10
11use crate::{
12 irq,
13 mem::{PhysAddr, mmu::LINER_OFFSET, region::boot_regions},
14 platform::{self, CPUHardId, CPUId, cpu_hard_id, cpu_list, kstack_size, mmu::page_size},
15 time::TimerData,
16};
17
18use super::once::OnceStatic;
19
20static IS_INITED: AtomicBool = AtomicBool::new(false);
21static HARD_TO_SOFT: OnceStatic<BTreeMap<CPUHardId, CPUId>> = OnceStatic::new(BTreeMap::new());
22static SOFT_TO_HARD: OnceStatic<BTreeMap<CPUId, CPUHardId>> = OnceStatic::new(BTreeMap::new());
23static PER_CPU: OnceStatic<BTreeMap<CPUHardId, PerCPU>> = OnceStatic::new(BTreeMap::new());
24
25impl From<CPUHardId> for CPUId {
26 fn from(value: CPUHardId) -> Self {
27 unsafe { *(*HARD_TO_SOFT.get()).get(&value).unwrap() }
28 }
29}
30
31impl From<CPUId> for CPUHardId {
32 fn from(value: CPUId) -> Self {
33 unsafe { *(*SOFT_TO_HARD.get()).get(&value).unwrap() }
34 }
35}
36
37pub struct PerCPU {
38 pub irq_chips: irq::CpuIrqChips,
39 pub timer: TimerData,
40 pub stack: Range<PhysAddr>,
41}
42
43pub unsafe fn setup_percpu() {
48 let mut idx = 0;
49 let cpu0 = cpu_hard_id();
50 add_cpu(cpu0, idx);
51 idx += 1;
52
53 let cpus = cpu_list();
54 for cpu in cpus {
55 if cpu.cpu_id == cpu0 {
56 continue;
57 }
58 add_cpu(cpu.cpu_id, idx);
59 idx += 1;
60 }
61 IS_INITED.store(true, Ordering::SeqCst);
62}
63
64fn add_cpu(cpu: CPUHardId, idx: usize) {
65 unsafe {
66 let id = CPUId::from(idx);
67
68 let stack_bottom = if idx == 0 {
69 let region = platform::boot_regions()
70 .into_iter()
71 .find(|o| o.name().contains("stack0"))
72 .expect("stack region not found!");
73
74 region.range.start
75 } else {
76 let stack =
77 alloc::alloc::alloc(Layout::from_size_align(kstack_size(), page_size()).unwrap());
78 PhysAddr::from(stack as usize - LINER_OFFSET)
79 };
80
81 (*PER_CPU.get()).insert(
82 cpu,
83 PerCPU {
84 irq_chips: Default::default(),
85 timer: Default::default(),
86 stack: stack_bottom..stack_bottom + kstack_size(),
87 },
88 );
89 (*HARD_TO_SOFT.get()).insert(cpu, id);
90 (*SOFT_TO_HARD.get()).insert(id, cpu);
91 }
92}
93
94pub fn cpu_global() -> &'static PerCPU {
95 cpu_global_meybeuninit().expect("CPU global is not init!")
96}
97
98pub unsafe fn cpu_global_mut() -> &'static mut PerCPU {
99 unsafe { cpu_global_mut_meybeunint().expect("CPU global is not init!") }
100}
101
102pub fn cpu_global_mut_meybeunint() -> Option<&'static mut PerCPU> {
103 if !IS_INITED.load(Ordering::SeqCst) {
104 return None;
105 }
106 let cpu = cpu_hard_id();
107 unsafe { (*PER_CPU.get()).get_mut(&cpu) }
108}
109
110pub fn cpu_global_meybeuninit() -> Option<&'static PerCPU> {
111 if !cpu_inited() {
112 return None;
113 }
114 let cpu = cpu_hard_id();
115 unsafe { (*PER_CPU.get()).get(&cpu) }
116}
117
118pub fn cpu_inited() -> bool {
119 IS_INITED.load(Ordering::SeqCst)
120}