1#![cfg_attr(not(test), no_std)]
2#![feature(step_trait)]
3#![deny(unsafe_op_in_unsafe_fn)]
4
5use crate::legacy_memory_region::{LegacyFrameAllocator, LegacyMemoryRegion};
6use bootloader_api::{
7 BootInfo, BootloaderConfig,
8 config::Mapping,
9 info::{FrameBuffer, FrameBufferInfo, MemoryRegion, TlsTemplate},
10};
11use bootloader_boot_config::{BootConfig, LevelFilter};
12use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice};
13use level_4_entries::UsedLevel4Entries;
14use usize_conversions::FromUsize;
15use x86_64::{
16 PhysAddr, VirtAddr,
17 structures::paging::{
18 FrameAllocator, Mapper, OffsetPageTable, Page, PageSize, PageTableFlags, PageTableIndex,
19 PhysFrame, Size2MiB, Size4KiB, page_table::PageTableLevel,
20 },
21};
22use xmas_elf::ElfFile;
23
24mod entropy;
26pub mod framebuffer;
28mod gdt;
29pub mod legacy_memory_region;
31pub mod level_4_entries;
33pub mod load_kernel;
35pub mod logger;
37pub mod serial;
39
40const PAGE_SIZE: u64 = 4096;
41
42pub fn init_logger(
44 framebuffer: &'static mut [u8],
45 info: FrameBufferInfo,
46 log_level: LevelFilter,
47 frame_buffer_logger_status: bool,
48 serial_logger_status: bool,
49) {
50 let logger = logger::LOGGER.get_or_init(move || {
51 logger::LockedLogger::new(
52 framebuffer,
53 info,
54 frame_buffer_logger_status,
55 serial_logger_status,
56 )
57 });
58 log::set_logger(logger).expect("logger already set");
59 log::set_max_level(convert_level(log_level));
60 log::info!("Framebuffer info: {:?}", info);
61}
62
63fn convert_level(level: LevelFilter) -> log::LevelFilter {
64 match level {
65 LevelFilter::Off => log::LevelFilter::Off,
66 LevelFilter::Error => log::LevelFilter::Error,
67 LevelFilter::Warn => log::LevelFilter::Warn,
68 LevelFilter::Info => log::LevelFilter::Info,
69 LevelFilter::Debug => log::LevelFilter::Debug,
70 LevelFilter::Trace => log::LevelFilter::Trace,
71 }
72}
73
74#[derive(Debug, Copy, Clone)]
76pub struct SystemInfo {
77 pub framebuffer: Option<RawFrameBufferInfo>,
79 pub rsdp_addr: Option<PhysAddr>,
81 pub ramdisk_addr: Option<u64>,
82 pub ramdisk_len: u64,
83}
84
85#[derive(Debug, Copy, Clone)]
87pub struct RawFrameBufferInfo {
88 pub addr: PhysAddr,
90 pub info: FrameBufferInfo,
92}
93
94pub struct Kernel<'a> {
95 pub elf: ElfFile<'a>,
96 pub config: BootloaderConfig,
97 pub start_address: *const u8,
98 pub len: usize,
99}
100
101impl<'a> Kernel<'a> {
102 pub fn parse(kernel_slice: &'a [u8]) -> Self {
103 let kernel_elf = ElfFile::new(kernel_slice).unwrap();
104 let config = {
105 let section = kernel_elf
106 .find_section_by_name(".bootloader-config")
107 .expect("bootloader config section not found; kernel must be compiled against bootloader_api");
108 let raw = section.raw_data(&kernel_elf);
109 BootloaderConfig::deserialize(raw)
110 .expect("kernel was compiled with incompatible bootloader_api version")
111 };
112 Kernel {
113 elf: kernel_elf,
114 config,
115 start_address: kernel_slice.as_ptr(),
116 len: kernel_slice.len(),
117 }
118 }
119}
120
121pub fn load_and_switch_to_kernel<I, D>(
127 kernel: Kernel,
128 boot_config: BootConfig,
129 mut frame_allocator: LegacyFrameAllocator<I, D>,
130 mut page_tables: PageTables,
131 system_info: SystemInfo,
132) -> !
133where
134 I: ExactSizeIterator<Item = D> + Clone,
135 D: LegacyMemoryRegion,
136{
137 let config = kernel.config;
138 let mut mappings = set_up_mappings(
139 kernel,
140 &mut frame_allocator,
141 &mut page_tables,
142 system_info.framebuffer.as_ref(),
143 &config,
144 &system_info,
145 );
146 let boot_info = create_boot_info(
147 &config,
148 &boot_config,
149 frame_allocator,
150 &mut page_tables,
151 &mut mappings,
152 system_info,
153 );
154 switch_to_kernel(page_tables, mappings, boot_info);
155}
156
157pub fn set_up_mappings<I, D>(
172 kernel: Kernel,
173 frame_allocator: &mut LegacyFrameAllocator<I, D>,
174 page_tables: &mut PageTables,
175 framebuffer: Option<&RawFrameBufferInfo>,
176 config: &BootloaderConfig,
177 system_info: &SystemInfo,
178) -> Mappings
179where
180 I: ExactSizeIterator<Item = D> + Clone,
181 D: LegacyMemoryRegion,
182{
183 let kernel_page_table = &mut page_tables.kernel;
184
185 let mut used_entries = UsedLevel4Entries::new(
186 frame_allocator.max_phys_addr(),
187 frame_allocator.len(),
188 framebuffer,
189 config,
190 &kernel.elf,
191 )
192 .expect("Failed to mark level 4 entries as used");
193
194 enable_nxe_bit();
196 enable_write_protect_bit();
198
199 let config = kernel.config;
200 let kernel_slice_start = PhysAddr::new(kernel.start_address as _);
201 let kernel_slice_len = u64::try_from(kernel.len).unwrap();
202
203 let (kernel_image_offset, entry_point, tls_template) = load_kernel::load_kernel(
204 kernel,
205 kernel_page_table,
206 frame_allocator,
207 &mut used_entries,
208 )
209 .expect("no entry point");
210 log::info!("Entry point at: {:#x}", entry_point.as_u64());
211 let stack_start = {
213 let guard_page = mapping_addr_page_aligned(
215 config.mappings.kernel_stack,
216 Size4KiB::SIZE + config.kernel_stack_size,
218 &mut used_entries,
219 "kernel stack start",
220 );
221 guard_page + 1
222 };
223 let stack_end_addr = stack_start.start_address() + config.kernel_stack_size;
224
225 let stack_end = Page::containing_address(stack_end_addr - 1u64);
226 for page in Page::range_inclusive(stack_start, stack_end) {
227 let frame = frame_allocator
228 .allocate_frame()
229 .expect("frame allocation failed when mapping a kernel stack");
230 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
231 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
232 Ok(tlb) => tlb.flush(),
233 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
234 }
235 }
236
237 let context_switch_function = PhysAddr::new(context_switch as *const () as u64);
240 let context_switch_function_start_frame: PhysFrame =
241 PhysFrame::containing_address(context_switch_function);
242 for frame in PhysFrame::range_inclusive(
243 context_switch_function_start_frame,
244 context_switch_function_start_frame + 1,
245 ) {
246 let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
247 match unsafe {
248 kernel_page_table.map_to_with_table_flags(
252 page,
253 frame,
254 PageTableFlags::PRESENT,
255 PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
256 frame_allocator,
257 )
258 } {
259 Ok(tlb) => tlb.flush(),
260 Err(err) => panic!("failed to identity map frame {:?}: {:?}", frame, err),
261 }
262 }
263
264 let gdt_frame = frame_allocator
266 .allocate_frame()
267 .expect("failed to allocate GDT frame");
268 gdt::create_and_load(gdt_frame);
269 let gdt_page = Page::containing_address(VirtAddr::new(gdt_frame.start_address().as_u64()));
270 match unsafe {
271 kernel_page_table.map_to_with_table_flags(
274 gdt_page,
275 gdt_frame,
276 PageTableFlags::PRESENT,
277 PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
278 frame_allocator,
279 )
280 } {
281 Ok(tlb) => tlb.flush(),
282 Err(err) => panic!("failed to identity map frame {:?}: {:?}", gdt_frame, err),
283 }
284
285 let framebuffer_virt_addr = if let Some(framebuffer) = framebuffer {
287 log::info!("Map framebuffer");
288
289 let framebuffer_start_frame: PhysFrame = PhysFrame::containing_address(framebuffer.addr);
290 let framebuffer_end_frame = PhysFrame::containing_address(
291 framebuffer.addr + framebuffer.info.byte_len as u64 - 1u64,
292 );
293 let start_page = mapping_addr_page_aligned(
294 config.mappings.framebuffer,
295 u64::from_usize(framebuffer.info.byte_len),
296 &mut used_entries,
297 "framebuffer",
298 );
299 for (i, frame) in
300 PhysFrame::range_inclusive(framebuffer_start_frame, framebuffer_end_frame).enumerate()
301 {
302 let page = start_page + u64::from_usize(i);
303 let flags =
304 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
305 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
306 Ok(tlb) => tlb.flush(),
307 Err(err) => panic!(
308 "failed to map page {:?} to frame {:?}: {:?}",
309 page, frame, err
310 ),
311 }
312 }
313 let framebuffer_virt_addr = start_page.start_address();
314 Some(framebuffer_virt_addr)
315 } else {
316 None
317 };
318 let ramdisk_slice_len = system_info.ramdisk_len;
319 let ramdisk_slice_phys_start = system_info.ramdisk_addr.map(PhysAddr::new);
320 let ramdisk_slice_start = if let Some(physical_address) = ramdisk_slice_phys_start {
321 let start_page = mapping_addr_page_aligned(
322 config.mappings.ramdisk_memory,
323 system_info.ramdisk_len,
324 &mut used_entries,
325 "ramdisk start",
326 );
327 let ramdisk_physical_start_page: PhysFrame<Size4KiB> =
328 PhysFrame::containing_address(physical_address);
329 let ramdisk_page_count = (system_info.ramdisk_len - 1) / Size4KiB::SIZE;
330 let ramdisk_physical_end_page = ramdisk_physical_start_page + ramdisk_page_count;
331
332 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
333 for (i, frame) in
334 PhysFrame::range_inclusive(ramdisk_physical_start_page, ramdisk_physical_end_page)
335 .enumerate()
336 {
337 let page = start_page + i as u64;
338 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
339 Ok(tlb) => tlb.ignore(),
340 Err(err) => panic!(
341 "Failed to map page {:?} to frame {:?}: {:?}",
342 page, frame, err
343 ),
344 };
345 }
346 Some(start_page.start_address())
347 } else {
348 None
349 };
350
351 let physical_memory_offset = if let Some(mapping) = config.mappings.physical_memory {
352 log::info!("Map physical memory");
353
354 let start_frame = PhysFrame::containing_address(PhysAddr::new(0));
355 let max_phys = frame_allocator.max_phys_addr();
356 let end_frame: PhysFrame<Size2MiB> = PhysFrame::containing_address(max_phys - 1u64);
357
358 let size = max_phys.as_u64();
359 let alignment = Size2MiB::SIZE;
360 let offset = mapping_addr(mapping, size, alignment, &mut used_entries)
361 .expect("start address for physical memory mapping must be 2MiB-page-aligned");
362
363 for frame in PhysFrame::range_inclusive(start_frame, end_frame) {
364 let page = Page::containing_address(offset + frame.start_address().as_u64());
365 let flags =
366 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
367 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
368 Ok(tlb) => tlb.ignore(),
369 Err(err) => panic!(
370 "failed to map page {:?} to frame {:?}: {:?}",
371 page, frame, err
372 ),
373 };
374 }
375
376 Some(offset)
377 } else {
378 None
379 };
380
381 let recursive_index = if let Some(mapping) = config.mappings.page_table_recursive {
382 log::info!("Map page table recursively");
383 let index = match mapping {
384 Mapping::Dynamic => used_entries.get_free_entries(1),
385 Mapping::FixedAddress(offset) => {
386 let offset = VirtAddr::new(offset);
387 let table_level = PageTableLevel::Four;
388 if !offset.is_aligned(table_level.entry_address_space_alignment()) {
389 panic!(
390 "Offset for recursive mapping must be properly aligned (must be \
391 a multiple of {:#x})",
392 table_level.entry_address_space_alignment()
393 );
394 }
395
396 offset.p4_index()
397 }
398 };
399
400 let entry = &mut kernel_page_table.level_4_table_mut()[index];
401 if !entry.is_unused() {
402 panic!(
403 "Could not set up recursive mapping: index {} already in use",
404 u16::from(index)
405 );
406 }
407 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
408 entry.set_frame(page_tables.kernel_level_4_frame, flags);
409
410 Some(index)
411 } else {
412 None
413 };
414
415 Mappings {
416 framebuffer: framebuffer_virt_addr,
417 entry_point,
418 stack_bottom: stack_start.start_address(),
419 stack_top: stack_end_addr.align_down(16u8),
423 used_entries,
424 physical_memory_offset,
425 recursive_index,
426 tls_template,
427
428 kernel_slice_start,
429 kernel_slice_len,
430 kernel_image_offset,
431
432 ramdisk_slice_phys_start,
433 ramdisk_slice_start,
434 ramdisk_slice_len,
435 }
436}
437
438pub struct Mappings {
440 pub entry_point: VirtAddr,
442 pub stack_bottom: VirtAddr,
443 pub stack_top: VirtAddr,
445 pub used_entries: UsedLevel4Entries,
448 pub framebuffer: Option<VirtAddr>,
450 pub physical_memory_offset: Option<VirtAddr>,
452 pub recursive_index: Option<PageTableIndex>,
454 pub tls_template: Option<TlsTemplate>,
456
457 pub kernel_slice_start: PhysAddr,
459 pub kernel_slice_len: u64,
461 pub kernel_image_offset: VirtAddr,
463 pub ramdisk_slice_phys_start: Option<PhysAddr>,
464 pub ramdisk_slice_start: Option<VirtAddr>,
465 pub ramdisk_slice_len: u64,
466}
467
468pub fn create_boot_info<I, D>(
475 config: &BootloaderConfig,
476 boot_config: &BootConfig,
477 mut frame_allocator: LegacyFrameAllocator<I, D>,
478 page_tables: &mut PageTables,
479 mappings: &mut Mappings,
480 system_info: SystemInfo,
481) -> &'static mut BootInfo
482where
483 I: ExactSizeIterator<Item = D> + Clone,
484 D: LegacyMemoryRegion,
485{
486 log::info!("Allocate bootinfo");
487
488 let (boot_info, memory_regions) = {
490 let boot_info_layout = Layout::new::<BootInfo>();
491 let regions = frame_allocator.memory_map_max_region_count();
492 let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
493 let (combined, memory_regions_offset) =
494 boot_info_layout.extend(memory_regions_layout).unwrap();
495
496 let boot_info_addr = mapping_addr(
497 config.mappings.boot_info,
498 u64::from_usize(combined.size()),
499 u64::from_usize(combined.align()),
500 &mut mappings.used_entries,
501 )
502 .expect("boot info addr is not properly aligned");
503
504 let memory_map_regions_addr = boot_info_addr + memory_regions_offset as u64;
505 let memory_map_regions_end = boot_info_addr + combined.size() as u64;
506
507 let start_page = Page::containing_address(boot_info_addr);
508 let end_page = Page::containing_address(memory_map_regions_end - 1u64);
509 for page in Page::range_inclusive(start_page, end_page) {
510 let flags =
511 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
512 let frame = frame_allocator
513 .allocate_frame()
514 .expect("frame allocation for boot info failed");
515 match unsafe {
516 page_tables
517 .kernel
518 .map_to(page, frame, flags, &mut frame_allocator)
519 } {
520 Ok(tlb) => tlb.flush(),
521 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
522 }
523 match unsafe {
525 page_tables
526 .bootloader
527 .map_to(page, frame, flags, &mut frame_allocator)
528 } {
529 Ok(tlb) => tlb.flush(),
530 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
531 }
532 }
533
534 let boot_info: &'static mut MaybeUninit<BootInfo> =
535 unsafe { &mut *boot_info_addr.as_mut_ptr() };
536 let memory_regions: &'static mut [MaybeUninit<MemoryRegion>] =
537 unsafe { slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), regions) };
538 (boot_info, memory_regions)
539 };
540
541 log::info!("Create Memory Map");
542
543 let memory_regions = frame_allocator.construct_memory_map(
545 memory_regions,
546 mappings.kernel_slice_start,
547 mappings.kernel_slice_len,
548 mappings.ramdisk_slice_phys_start,
549 mappings.ramdisk_slice_len,
550 );
551
552 log::info!("Create bootinfo");
553
554 let boot_info = boot_info.write({
556 let mut info = BootInfo::new(memory_regions.into());
557 info.framebuffer = mappings
558 .framebuffer
559 .map(|addr| unsafe {
560 FrameBuffer::new(
561 addr.as_u64(),
562 system_info
563 .framebuffer
564 .expect(
565 "there shouldn't be a mapping for the framebuffer if there is \
566 no framebuffer",
567 )
568 .info,
569 )
570 })
571 .into();
572 info.physical_memory_offset = mappings.physical_memory_offset.map(VirtAddr::as_u64).into();
573 info.recursive_index = mappings.recursive_index.map(Into::into).into();
574 info.rsdp_addr = system_info.rsdp_addr.map(|addr| addr.as_u64()).into();
575 info.tls_template = mappings.tls_template.into();
576 info.ramdisk_addr = mappings
577 .ramdisk_slice_start
578 .map(|addr| addr.as_u64())
579 .into();
580 info.ramdisk_len = mappings.ramdisk_slice_len;
581 info.kernel_addr = mappings.kernel_slice_start.as_u64();
582 info.kernel_len = mappings.kernel_slice_len as _;
583 info.kernel_image_offset = mappings.kernel_image_offset.as_u64();
584 info.kernel_stack_bottom = mappings.stack_bottom.as_u64();
585 info.kernel_stack_len = config.kernel_stack_size;
586 info._test_sentinel = boot_config._test_sentinel;
587 info
588 });
589
590 boot_info
591}
592
593pub fn switch_to_kernel(
595 page_tables: PageTables,
596 mappings: Mappings,
597 boot_info: &'static mut BootInfo,
598) -> ! {
599 let PageTables {
600 kernel_level_4_frame,
601 ..
602 } = page_tables;
603 let addresses = Addresses {
604 page_table: kernel_level_4_frame,
605 stack_top: mappings.stack_top,
606 entry_point: mappings.entry_point,
607 boot_info,
608 };
609
610 log::info!(
611 "Jumping to kernel entry point at {:?}",
612 addresses.entry_point
613 );
614
615 unsafe {
616 context_switch(addresses);
617 }
618}
619
620pub struct PageTables {
622 pub bootloader: OffsetPageTable<'static>,
624 pub kernel: OffsetPageTable<'static>,
626 pub kernel_level_4_frame: PhysFrame,
632}
633
634unsafe fn context_switch(addresses: Addresses) -> ! {
636 unsafe {
637 asm!(
638 r#"
639 xor rbp, rbp
640 mov cr3, {}
641 mov rsp, {}
642 push 0
643 jmp {}
644 "#,
645 in(reg) addresses.page_table.start_address().as_u64(),
646 in(reg) addresses.stack_top.as_u64(),
647 in(reg) addresses.entry_point.as_u64(),
648 in("rdi") addresses.boot_info as *const _ as usize,
649 );
650 }
651 unreachable!();
652}
653
654struct Addresses {
656 page_table: PhysFrame,
657 stack_top: VirtAddr,
658 entry_point: VirtAddr,
659 boot_info: &'static mut BootInfo,
660}
661
662fn mapping_addr_page_aligned(
663 mapping: Mapping,
664 size: u64,
665 used_entries: &mut UsedLevel4Entries,
666 kind: &str,
667) -> Page {
668 match mapping_addr(mapping, size, Size4KiB::SIZE, used_entries) {
669 Ok(addr) => Page::from_start_address(addr).unwrap(),
670 Err(addr) => panic!("{kind} address must be page-aligned (is `{addr:?})`"),
671 }
672}
673
674fn mapping_addr(
675 mapping: Mapping,
676 size: u64,
677 alignment: u64,
678 used_entries: &mut UsedLevel4Entries,
679) -> Result<VirtAddr, VirtAddr> {
680 let addr = match mapping {
681 Mapping::FixedAddress(addr) => VirtAddr::new(addr),
682 Mapping::Dynamic => used_entries.get_free_address(size, alignment),
683 };
684 if addr.is_aligned(alignment) {
685 Ok(addr)
686 } else {
687 Err(addr)
688 }
689}
690
691fn enable_nxe_bit() {
692 use x86_64::registers::control::{Efer, EferFlags};
693 unsafe { Efer::update(|efer| *efer |= EferFlags::NO_EXECUTE_ENABLE) }
694}
695
696fn enable_write_protect_bit() {
697 use x86_64::registers::control::{Cr0, Cr0Flags};
698 unsafe { Cr0::update(|cr0| *cr0 |= Cr0Flags::WRITE_PROTECT) };
699}