1use core::{arch::naked_asm, fmt};
2
3use memory_addr::VirtAddr;
4
5#[allow(missing_docs)]
7#[repr(C)]
8#[derive(Debug, Default, Clone, Copy)]
9pub struct TrapFrame {
10 pub rax: u64,
11 pub rcx: u64,
12 pub rdx: u64,
13 pub rbx: u64,
14 pub rbp: u64,
15 pub rsi: u64,
16 pub rdi: u64,
17 pub r8: u64,
18 pub r9: u64,
19 pub r10: u64,
20 pub r11: u64,
21 pub r12: u64,
22 pub r13: u64,
23 pub r14: u64,
24 pub r15: u64,
25
26 pub vector: u64,
28 pub error_code: u64,
29
30 pub rip: u64,
32 pub cs: u64,
33 pub rflags: u64,
34 pub rsp: u64,
35 pub ss: u64,
36}
37
38impl TrapFrame {
39 pub const fn arg0(&self) -> usize {
41 self.rdi as _
42 }
43
44 pub const fn set_arg0(&mut self, rdi: usize) {
46 self.rdi = rdi as _;
47 }
48
49 pub const fn arg1(&self) -> usize {
51 self.rsi as _
52 }
53
54 pub const fn set_arg1(&mut self, rsi: usize) {
56 self.rsi = rsi as _;
57 }
58
59 pub const fn arg2(&self) -> usize {
61 self.rdx as _
62 }
63
64 pub const fn set_arg2(&mut self, rdx: usize) {
66 self.rdx = rdx as _;
67 }
68
69 pub const fn arg3(&self) -> usize {
71 self.r10 as _
72 }
73
74 pub const fn set_arg3(&mut self, r10: usize) {
76 self.r10 = r10 as _;
77 }
78
79 pub const fn arg4(&self) -> usize {
81 self.r8 as _
82 }
83
84 pub const fn set_arg4(&mut self, r8: usize) {
86 self.r8 = r8 as _;
87 }
88
89 pub const fn arg5(&self) -> usize {
91 self.r9 as _
92 }
93
94 pub const fn set_arg5(&mut self, r9: usize) {
96 self.r9 = r9 as _;
97 }
98
99 pub const fn ip(&self) -> usize {
101 self.rip as _
102 }
103
104 pub const fn set_ip(&mut self, rip: usize) {
106 self.rip = rip as _;
107 }
108
109 pub const fn sp(&self) -> usize {
111 self.rsp as _
112 }
113
114 pub const fn set_sp(&mut self, rsp: usize) {
116 self.rsp = rsp as _;
117 }
118
119 pub const fn sysno(&self) -> usize {
121 self.rax as usize
122 }
123
124 pub const fn set_sysno(&mut self, rax: usize) {
126 self.rax = rax as _;
127 }
128
129 pub const fn retval(&self) -> usize {
131 self.rax as _
132 }
133
134 pub const fn set_retval(&mut self, rax: usize) {
136 self.rax = rax as _;
137 }
138
139 pub fn backtrace(&self) -> axbacktrace::Backtrace {
141 axbacktrace::Backtrace::capture_trap(self.rbp as _, self.rip as _, 0)
142 }
143}
144
145#[repr(C)]
146#[derive(Debug, Default)]
147struct ContextSwitchFrame {
148 r15: u64,
149 r14: u64,
150 r13: u64,
151 r12: u64,
152 rbx: u64,
153 rbp: u64,
154 rip: u64,
155}
156
157#[allow(missing_docs)]
162#[repr(C, align(16))]
163#[derive(Debug)]
164pub struct FxsaveArea {
165 pub fcw: u16,
166 pub fsw: u16,
167 pub ftw: u16,
168 pub fop: u16,
169 pub fip: u64,
170 pub fdp: u64,
171 pub mxcsr: u32,
172 pub mxcsr_mask: u32,
173 pub st: [u64; 16],
174 pub xmm: [u64; 32],
175 _padding: [u64; 12],
176}
177
178static_assertions::const_assert_eq!(core::mem::size_of::<FxsaveArea>(), 512);
179
180pub struct ExtendedState {
182 pub fxsave_area: FxsaveArea,
184}
185
186#[cfg(feature = "fp-simd")]
187impl ExtendedState {
188 #[inline]
190 pub fn save(&mut self) {
191 unsafe { core::arch::x86_64::_fxsave64(&mut self.fxsave_area as *mut _ as *mut u8) }
192 }
193
194 #[inline]
196 pub fn restore(&self) {
197 unsafe { core::arch::x86_64::_fxrstor64(&self.fxsave_area as *const _ as *const u8) }
198 }
199
200 pub const fn default() -> Self {
202 let mut area: FxsaveArea = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
203 area.fcw = 0x37f;
204 area.ftw = 0xffff;
205 area.mxcsr = 0x1f80;
206 Self { fxsave_area: area }
207 }
208}
209
210impl fmt::Debug for ExtendedState {
211 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
212 f.debug_struct("ExtendedState")
213 .field("fxsave_area", &self.fxsave_area)
214 .finish()
215 }
216}
217
218#[derive(Debug)]
238pub struct TaskContext {
239 pub kstack_top: VirtAddr,
241 pub rsp: u64,
243 pub fs_base: usize,
245 #[cfg(feature = "fp-simd")]
247 pub ext_state: ExtendedState,
248 #[cfg(feature = "uspace")]
250 pub cr3: memory_addr::PhysAddr,
251}
252
253impl TaskContext {
254 pub fn new() -> Self {
262 Self {
263 kstack_top: va!(0),
264 rsp: 0,
265 fs_base: 0,
266 #[cfg(feature = "uspace")]
267 cr3: crate::asm::read_kernel_page_table(),
268 #[cfg(feature = "fp-simd")]
269 ext_state: ExtendedState::default(),
270 }
271 }
272
273 pub fn init(&mut self, entry: usize, kstack_top: VirtAddr, tls_area: VirtAddr) {
276 unsafe {
277 let frame_ptr = (kstack_top.as_mut_ptr() as *mut u64).sub(1);
281 let frame_ptr = (frame_ptr as *mut ContextSwitchFrame).sub(1);
282 core::ptr::write(
283 frame_ptr,
284 ContextSwitchFrame {
285 rip: entry as _,
286 ..Default::default()
287 },
288 );
289 self.rsp = frame_ptr as u64;
290 }
291 self.kstack_top = kstack_top;
292 self.fs_base = tls_area.as_usize();
293 }
294
295 #[cfg(feature = "uspace")]
300 pub fn set_page_table_root(&mut self, cr3: memory_addr::PhysAddr) {
301 self.cr3 = cr3;
302 }
303
304 pub fn switch_to(&mut self, next_ctx: &Self) {
309 #[cfg(feature = "fp-simd")]
310 {
311 self.ext_state.save();
312 next_ctx.ext_state.restore();
313 }
314 #[cfg(feature = "tls")]
315 unsafe {
316 self.fs_base = crate::asm::read_thread_pointer();
317 crate::asm::write_thread_pointer(next_ctx.fs_base);
318 }
319 #[cfg(feature = "uspace")]
320 unsafe {
321 if next_ctx.cr3 != self.cr3 {
322 crate::asm::write_user_page_table(next_ctx.cr3);
323 }
325 }
326 unsafe { context_switch(&mut self.rsp, &next_ctx.rsp) }
327 }
328}
329
330#[unsafe(naked)]
331unsafe extern "C" fn context_switch(_current_stack: &mut u64, _next_stack: &u64) {
332 naked_asm!(
333 "
334 .code64
335 push rbp
336 push rbx
337 push r12
338 push r13
339 push r14
340 push r15
341 mov [rdi], rsp
342
343 mov rsp, [rsi]
344 pop r15
345 pop r14
346 pop r13
347 pop r12
348 pop rbx
349 pop rbp
350 ret",
351 )
352}