1#![cfg_attr(not(test), no_std)]
2#![feature(step_trait)]
3#![deny(unsafe_op_in_unsafe_fn)]
4
5use crate::legacy_memory_region::{LegacyFrameAllocator, LegacyMemoryRegion};
6use bootloader_api::{
7 config::Mapping,
8 info::{FrameBuffer, FrameBufferInfo, MemoryRegion, TlsTemplate},
9 BootInfo, BootloaderConfig,
10};
11use bootloader_boot_config::{BootConfig, LevelFilter};
12use core::{alloc::Layout, arch::asm, mem::MaybeUninit, slice};
13use level_4_entries::UsedLevel4Entries;
14use usize_conversions::FromUsize;
15use x86_64::{
16 structures::paging::{
17 page_table::PageTableLevel, FrameAllocator, Mapper, OffsetPageTable, Page, PageSize,
18 PageTableFlags, PageTableIndex, PhysFrame, Size2MiB, Size4KiB,
19 },
20 PhysAddr, VirtAddr,
21};
22use xmas_elf::ElfFile;
23
24mod entropy;
26pub mod framebuffer;
28mod gdt;
29pub mod legacy_memory_region;
31pub mod level_4_entries;
33pub mod load_kernel;
35pub mod logger;
37pub mod serial;
39
40const PAGE_SIZE: u64 = 4096;
41
42pub fn init_logger(
44 framebuffer: &'static mut [u8],
45 info: FrameBufferInfo,
46 log_level: LevelFilter,
47 frame_buffer_logger_status: bool,
48 serial_logger_status: bool,
49) {
50 let logger = logger::LOGGER.get_or_init(move || {
51 logger::LockedLogger::new(
52 framebuffer,
53 info,
54 frame_buffer_logger_status,
55 serial_logger_status,
56 )
57 });
58 log::set_logger(logger).expect("logger already set");
59 log::set_max_level(convert_level(log_level));
60 log::info!("Framebuffer info: {:?}", info);
61}
62
63fn convert_level(level: LevelFilter) -> log::LevelFilter {
64 match level {
65 LevelFilter::Off => log::LevelFilter::Off,
66 LevelFilter::Error => log::LevelFilter::Error,
67 LevelFilter::Warn => log::LevelFilter::Warn,
68 LevelFilter::Info => log::LevelFilter::Info,
69 LevelFilter::Debug => log::LevelFilter::Debug,
70 LevelFilter::Trace => log::LevelFilter::Trace,
71 }
72}
73
74#[derive(Debug, Copy, Clone)]
76pub struct SystemInfo {
77 pub framebuffer: Option<RawFrameBufferInfo>,
79 pub rsdp_addr: Option<PhysAddr>,
81 pub ramdisk_addr: Option<u64>,
82 pub ramdisk_len: u64,
83}
84
85#[derive(Debug, Copy, Clone)]
87pub struct RawFrameBufferInfo {
88 pub addr: PhysAddr,
90 pub info: FrameBufferInfo,
92}
93
94pub struct Kernel<'a> {
95 pub elf: ElfFile<'a>,
96 pub config: BootloaderConfig,
97 pub start_address: *const u8,
98 pub len: usize,
99}
100
101impl<'a> Kernel<'a> {
102 pub fn parse(kernel_slice: &'a [u8]) -> Self {
103 let kernel_elf = ElfFile::new(kernel_slice).unwrap();
104 let config = {
105 let section = kernel_elf
106 .find_section_by_name(".bootloader-config")
107 .expect("bootloader config section not found; kernel must be compiled against bootloader_api");
108 let raw = section.raw_data(&kernel_elf);
109 BootloaderConfig::deserialize(raw)
110 .expect("kernel was compiled with incompatible bootloader_api version")
111 };
112 Kernel {
113 elf: kernel_elf,
114 config,
115 start_address: kernel_slice.as_ptr(),
116 len: kernel_slice.len(),
117 }
118 }
119}
120
121pub fn load_and_switch_to_kernel<I, D>(
127 kernel: Kernel,
128 boot_config: BootConfig,
129 mut frame_allocator: LegacyFrameAllocator<I, D>,
130 mut page_tables: PageTables,
131 system_info: SystemInfo,
132) -> !
133where
134 I: ExactSizeIterator<Item = D> + Clone,
135 D: LegacyMemoryRegion,
136{
137 let config = kernel.config;
138 let mut mappings = set_up_mappings(
139 kernel,
140 &mut frame_allocator,
141 &mut page_tables,
142 system_info.framebuffer.as_ref(),
143 &config,
144 &system_info,
145 );
146 let boot_info = create_boot_info(
147 &config,
148 &boot_config,
149 frame_allocator,
150 &mut page_tables,
151 &mut mappings,
152 system_info,
153 );
154 switch_to_kernel(page_tables, mappings, boot_info);
155}
156
157pub fn set_up_mappings<I, D>(
172 kernel: Kernel,
173 frame_allocator: &mut LegacyFrameAllocator<I, D>,
174 page_tables: &mut PageTables,
175 framebuffer: Option<&RawFrameBufferInfo>,
176 config: &BootloaderConfig,
177 system_info: &SystemInfo,
178) -> Mappings
179where
180 I: ExactSizeIterator<Item = D> + Clone,
181 D: LegacyMemoryRegion,
182{
183 let kernel_page_table = &mut page_tables.kernel;
184
185 let mut used_entries = UsedLevel4Entries::new(
186 frame_allocator.max_phys_addr(),
187 frame_allocator.len(),
188 framebuffer,
189 config,
190 &kernel.elf,
191 )
192 .expect("Failed to mark level 4 entries as used");
193
194 enable_nxe_bit();
196 enable_write_protect_bit();
198
199 let config = kernel.config;
200 let kernel_slice_start = PhysAddr::new(kernel.start_address as _);
201 let kernel_slice_len = u64::try_from(kernel.len).unwrap();
202
203 let (kernel_image_offset, entry_point, tls_template) = load_kernel::load_kernel(
204 kernel,
205 kernel_page_table,
206 frame_allocator,
207 &mut used_entries,
208 )
209 .expect("no entry point");
210 log::info!("Entry point at: {:#x}", entry_point.as_u64());
211 let stack_start = {
213 let guard_page = mapping_addr_page_aligned(
215 config.mappings.kernel_stack,
216 Size4KiB::SIZE + config.kernel_stack_size,
218 &mut used_entries,
219 "kernel stack start",
220 );
221 guard_page + 1
222 };
223 let stack_end_addr = stack_start.start_address() + config.kernel_stack_size;
224
225 let stack_end = Page::containing_address(stack_end_addr - 1u64);
226 for page in Page::range_inclusive(stack_start, stack_end) {
227 let frame = frame_allocator
228 .allocate_frame()
229 .expect("frame allocation failed when mapping a kernel stack");
230 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
231 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
232 Ok(tlb) => tlb.flush(),
233 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
234 }
235 }
236
237 let context_switch_function = PhysAddr::new(context_switch as *const () as u64);
240 let context_switch_function_start_frame: PhysFrame =
241 PhysFrame::containing_address(context_switch_function);
242 for frame in PhysFrame::range_inclusive(
243 context_switch_function_start_frame,
244 context_switch_function_start_frame + 1,
245 ) {
246 let page = Page::containing_address(VirtAddr::new(frame.start_address().as_u64()));
247 match unsafe {
248 kernel_page_table.map_to_with_table_flags(
252 page,
253 frame,
254 PageTableFlags::PRESENT,
255 PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
256 frame_allocator,
257 )
258 } {
259 Ok(tlb) => tlb.flush(),
260 Err(err) => panic!("failed to identity map frame {:?}: {:?}", frame, err),
261 }
262 }
263
264 let gdt_frame = frame_allocator
266 .allocate_frame()
267 .expect("failed to allocate GDT frame");
268 gdt::create_and_load(gdt_frame);
269 let gdt_page = Page::containing_address(VirtAddr::new(gdt_frame.start_address().as_u64()));
270 match unsafe {
271 kernel_page_table.map_to_with_table_flags(
274 gdt_page,
275 gdt_frame,
276 PageTableFlags::PRESENT,
277 PageTableFlags::PRESENT | PageTableFlags::WRITABLE,
278 frame_allocator,
279 )
280 } {
281 Ok(tlb) => tlb.flush(),
282 Err(err) => panic!("failed to identity map frame {:?}: {:?}", gdt_frame, err),
283 }
284
285 let framebuffer_virt_addr = if let Some(framebuffer) = framebuffer {
287 log::info!("Map framebuffer");
288
289 let framebuffer_start_frame: PhysFrame = PhysFrame::containing_address(framebuffer.addr);
290 let framebuffer_end_frame = PhysFrame::containing_address(
291 framebuffer.addr + framebuffer.info.byte_len as u64 - 1u64,
292 );
293 let start_page = mapping_addr_page_aligned(
294 config.mappings.framebuffer,
295 u64::from_usize(framebuffer.info.byte_len),
296 &mut used_entries,
297 "framebuffer",
298 );
299 for (i, frame) in
300 PhysFrame::range_inclusive(framebuffer_start_frame, framebuffer_end_frame).enumerate()
301 {
302 let page = start_page + u64::from_usize(i);
303 let flags =
304 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
305 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
306 Ok(tlb) => tlb.flush(),
307 Err(err) => panic!(
308 "failed to map page {:?} to frame {:?}: {:?}",
309 page, frame, err
310 ),
311 }
312 }
313 let framebuffer_virt_addr = start_page.start_address();
314 Some(framebuffer_virt_addr)
315 } else {
316 None
317 };
318 let ramdisk_slice_len = system_info.ramdisk_len;
319 let ramdisk_slice_phys_start = system_info.ramdisk_addr.map(PhysAddr::new);
320 let ramdisk_slice_start = if let Some(physical_address) = ramdisk_slice_phys_start {
321 let start_page = mapping_addr_page_aligned(
322 config.mappings.ramdisk_memory,
323 system_info.ramdisk_len,
324 &mut used_entries,
325 "ramdisk start",
326 );
327 let ramdisk_physical_start_page: PhysFrame<Size4KiB> =
328 PhysFrame::containing_address(physical_address);
329 let ramdisk_page_count = (system_info.ramdisk_len - 1) / Size4KiB::SIZE;
330 let ramdisk_physical_end_page = ramdisk_physical_start_page + ramdisk_page_count;
331
332 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
333 for (i, frame) in
334 PhysFrame::range_inclusive(ramdisk_physical_start_page, ramdisk_physical_end_page)
335 .enumerate()
336 {
337 let page = start_page + i as u64;
338 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
339 Ok(tlb) => tlb.ignore(),
340 Err(err) => panic!(
341 "Failed to map page {:?} to frame {:?}: {:?}",
342 page, frame, err
343 ),
344 };
345 }
346 Some(start_page.start_address())
347 } else {
348 None
349 };
350
351 let physical_memory_offset = if let Some(mapping) = config.mappings.physical_memory {
352 log::info!("Map physical memory");
353
354 let start_frame = PhysFrame::containing_address(PhysAddr::new(0));
355 let max_phys = frame_allocator.max_phys_addr();
356 let end_frame: PhysFrame<Size2MiB> = PhysFrame::containing_address(max_phys - 1u64);
357
358 let size = max_phys.as_u64();
359 let alignment = Size2MiB::SIZE;
360 let offset = mapping_addr(mapping, size, alignment, &mut used_entries)
361 .expect("start address for physical memory mapping must be 2MiB-page-aligned");
362
363 for frame in PhysFrame::range_inclusive(start_frame, end_frame) {
364 let page = Page::containing_address(offset + frame.start_address().as_u64());
365 let flags =
366 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
367 match unsafe { kernel_page_table.map_to(page, frame, flags, frame_allocator) } {
368 Ok(tlb) => tlb.ignore(),
369 Err(err) => panic!(
370 "failed to map page {:?} to frame {:?}: {:?}",
371 page, frame, err
372 ),
373 };
374 }
375
376 Some(offset)
377 } else {
378 None
379 };
380
381 let recursive_index = if let Some(mapping) = config.mappings.page_table_recursive {
382 log::info!("Map page table recursively");
383 let index = match mapping {
384 Mapping::Dynamic => used_entries.get_free_entries(1),
385 Mapping::FixedAddress(offset) => {
386 let offset = VirtAddr::new(offset);
387 let table_level = PageTableLevel::Four;
388 if !offset.is_aligned(table_level.entry_address_space_alignment()) {
389 panic!(
390 "Offset for recursive mapping must be properly aligned (must be \
391 a multiple of {:#x})",
392 table_level.entry_address_space_alignment()
393 );
394 }
395
396 offset.p4_index()
397 }
398 };
399
400 let entry = &mut kernel_page_table.level_4_table_mut()[index];
401 if !entry.is_unused() {
402 panic!(
403 "Could not set up recursive mapping: index {} already in use",
404 u16::from(index)
405 );
406 }
407 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
408 entry.set_frame(page_tables.kernel_level_4_frame, flags);
409
410 Some(index)
411 } else {
412 None
413 };
414
415 Mappings {
416 framebuffer: framebuffer_virt_addr,
417 entry_point,
418 stack_top: stack_end_addr.align_down(16u8),
422 used_entries,
423 physical_memory_offset,
424 recursive_index,
425 tls_template,
426
427 kernel_slice_start,
428 kernel_slice_len,
429 kernel_image_offset,
430
431 ramdisk_slice_phys_start,
432 ramdisk_slice_start,
433 ramdisk_slice_len,
434 }
435}
436
437pub struct Mappings {
439 pub entry_point: VirtAddr,
441 pub stack_top: VirtAddr,
443 pub used_entries: UsedLevel4Entries,
446 pub framebuffer: Option<VirtAddr>,
448 pub physical_memory_offset: Option<VirtAddr>,
450 pub recursive_index: Option<PageTableIndex>,
452 pub tls_template: Option<TlsTemplate>,
454
455 pub kernel_slice_start: PhysAddr,
457 pub kernel_slice_len: u64,
459 pub kernel_image_offset: VirtAddr,
461 pub ramdisk_slice_phys_start: Option<PhysAddr>,
462 pub ramdisk_slice_start: Option<VirtAddr>,
463 pub ramdisk_slice_len: u64,
464}
465
466pub fn create_boot_info<I, D>(
473 config: &BootloaderConfig,
474 boot_config: &BootConfig,
475 mut frame_allocator: LegacyFrameAllocator<I, D>,
476 page_tables: &mut PageTables,
477 mappings: &mut Mappings,
478 system_info: SystemInfo,
479) -> &'static mut BootInfo
480where
481 I: ExactSizeIterator<Item = D> + Clone,
482 D: LegacyMemoryRegion,
483{
484 log::info!("Allocate bootinfo");
485
486 let (boot_info, memory_regions) = {
488 let boot_info_layout = Layout::new::<BootInfo>();
489 let regions = frame_allocator.memory_map_max_region_count();
490 let memory_regions_layout = Layout::array::<MemoryRegion>(regions).unwrap();
491 let (combined, memory_regions_offset) =
492 boot_info_layout.extend(memory_regions_layout).unwrap();
493
494 let boot_info_addr = mapping_addr(
495 config.mappings.boot_info,
496 u64::from_usize(combined.size()),
497 u64::from_usize(combined.align()),
498 &mut mappings.used_entries,
499 )
500 .expect("boot info addr is not properly aligned");
501
502 let memory_map_regions_addr = boot_info_addr + memory_regions_offset as u64;
503 let memory_map_regions_end = boot_info_addr + combined.size() as u64;
504
505 let start_page = Page::containing_address(boot_info_addr);
506 let end_page = Page::containing_address(memory_map_regions_end - 1u64);
507 for page in Page::range_inclusive(start_page, end_page) {
508 let flags =
509 PageTableFlags::PRESENT | PageTableFlags::WRITABLE | PageTableFlags::NO_EXECUTE;
510 let frame = frame_allocator
511 .allocate_frame()
512 .expect("frame allocation for boot info failed");
513 match unsafe {
514 page_tables
515 .kernel
516 .map_to(page, frame, flags, &mut frame_allocator)
517 } {
518 Ok(tlb) => tlb.flush(),
519 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
520 }
521 match unsafe {
523 page_tables
524 .bootloader
525 .map_to(page, frame, flags, &mut frame_allocator)
526 } {
527 Ok(tlb) => tlb.flush(),
528 Err(err) => panic!("failed to map page {:?}: {:?}", page, err),
529 }
530 }
531
532 let boot_info: &'static mut MaybeUninit<BootInfo> =
533 unsafe { &mut *boot_info_addr.as_mut_ptr() };
534 let memory_regions: &'static mut [MaybeUninit<MemoryRegion>] =
535 unsafe { slice::from_raw_parts_mut(memory_map_regions_addr.as_mut_ptr(), regions) };
536 (boot_info, memory_regions)
537 };
538
539 log::info!("Create Memory Map");
540
541 let memory_regions = frame_allocator.construct_memory_map(
543 memory_regions,
544 mappings.kernel_slice_start,
545 mappings.kernel_slice_len,
546 mappings.ramdisk_slice_phys_start,
547 mappings.ramdisk_slice_len,
548 );
549
550 log::info!("Create bootinfo");
551
552 let boot_info = boot_info.write({
554 let mut info = BootInfo::new(memory_regions.into());
555 info.framebuffer = mappings
556 .framebuffer
557 .map(|addr| unsafe {
558 FrameBuffer::new(
559 addr.as_u64(),
560 system_info
561 .framebuffer
562 .expect(
563 "there shouldn't be a mapping for the framebuffer if there is \
564 no framebuffer",
565 )
566 .info,
567 )
568 })
569 .into();
570 info.physical_memory_offset = mappings.physical_memory_offset.map(VirtAddr::as_u64).into();
571 info.recursive_index = mappings.recursive_index.map(Into::into).into();
572 info.rsdp_addr = system_info.rsdp_addr.map(|addr| addr.as_u64()).into();
573 info.tls_template = mappings.tls_template.into();
574 info.ramdisk_addr = mappings
575 .ramdisk_slice_start
576 .map(|addr| addr.as_u64())
577 .into();
578 info.ramdisk_len = mappings.ramdisk_slice_len;
579 info.kernel_addr = mappings.kernel_slice_start.as_u64();
580 info.kernel_len = mappings.kernel_slice_len as _;
581 info.kernel_image_offset = mappings.kernel_image_offset.as_u64();
582 info._test_sentinel = boot_config._test_sentinel;
583 info
584 });
585
586 boot_info
587}
588
589pub fn switch_to_kernel(
591 page_tables: PageTables,
592 mappings: Mappings,
593 boot_info: &'static mut BootInfo,
594) -> ! {
595 let PageTables {
596 kernel_level_4_frame,
597 ..
598 } = page_tables;
599 let addresses = Addresses {
600 page_table: kernel_level_4_frame,
601 stack_top: mappings.stack_top,
602 entry_point: mappings.entry_point,
603 boot_info,
604 };
605
606 log::info!(
607 "Jumping to kernel entry point at {:?}",
608 addresses.entry_point
609 );
610
611 unsafe {
612 context_switch(addresses);
613 }
614}
615
616pub struct PageTables {
618 pub bootloader: OffsetPageTable<'static>,
620 pub kernel: OffsetPageTable<'static>,
622 pub kernel_level_4_frame: PhysFrame,
628}
629
630unsafe fn context_switch(addresses: Addresses) -> ! {
632 unsafe {
633 asm!(
634 r#"
635 xor rbp, rbp
636 mov cr3, {}
637 mov rsp, {}
638 push 0
639 jmp {}
640 "#,
641 in(reg) addresses.page_table.start_address().as_u64(),
642 in(reg) addresses.stack_top.as_u64(),
643 in(reg) addresses.entry_point.as_u64(),
644 in("rdi") addresses.boot_info as *const _ as usize,
645 );
646 }
647 unreachable!();
648}
649
650struct Addresses {
652 page_table: PhysFrame,
653 stack_top: VirtAddr,
654 entry_point: VirtAddr,
655 boot_info: &'static mut BootInfo,
656}
657
658fn mapping_addr_page_aligned(
659 mapping: Mapping,
660 size: u64,
661 used_entries: &mut UsedLevel4Entries,
662 kind: &str,
663) -> Page {
664 match mapping_addr(mapping, size, Size4KiB::SIZE, used_entries) {
665 Ok(addr) => Page::from_start_address(addr).unwrap(),
666 Err(addr) => panic!("{kind} address must be page-aligned (is `{addr:?})`"),
667 }
668}
669
670fn mapping_addr(
671 mapping: Mapping,
672 size: u64,
673 alignment: u64,
674 used_entries: &mut UsedLevel4Entries,
675) -> Result<VirtAddr, VirtAddr> {
676 let addr = match mapping {
677 Mapping::FixedAddress(addr) => VirtAddr::new(addr),
678 Mapping::Dynamic => used_entries.get_free_address(size, alignment),
679 };
680 if addr.is_aligned(alignment) {
681 Ok(addr)
682 } else {
683 Err(addr)
684 }
685}
686
687fn enable_nxe_bit() {
688 use x86_64::registers::control::{Efer, EferFlags};
689 unsafe { Efer::update(|efer| *efer |= EferFlags::NO_EXECUTE_ENABLE) }
690}
691
692fn enable_write_protect_bit() {
693 use x86_64::registers::control::{Cr0, Cr0Flags};
694 unsafe { Cr0::update(|cr0| *cr0 |= Cr0Flags::WRITE_PROTECT) };
695}