1use crate::{level_4_entries::UsedLevel4Entries, PAGE_SIZE};
2use bootloader_api::{config::Mapping, info::TlsTemplate};
3use core::{cmp, iter::Step, mem::size_of, ops::Add};
4
5use x86_64::{
6 align_up,
7 structures::paging::{
8 mapper::{MappedFrame, MapperAllSizes, TranslateResult},
9 FrameAllocator, Page, PageSize, PageTableFlags as Flags, PhysFrame, Size4KiB, Translate,
10 },
11 PhysAddr, VirtAddr,
12};
13use xmas_elf::{
14 dynamic, header,
15 program::{self, ProgramHeader, SegmentData, Type},
16 sections::Rela,
17 ElfFile,
18};
19
20use super::Kernel;
21
22const COPIED: Flags = Flags::BIT_9;
24
25struct Loader<'a, M, F> {
26 elf_file: ElfFile<'a>,
27 inner: Inner<'a, M, F>,
28}
29
30struct Inner<'a, M, F> {
31 kernel_offset: PhysAddr,
32 virtual_address_offset: VirtualAddressOffset,
33 page_table: &'a mut M,
34 frame_allocator: &'a mut F,
35}
36
37impl<'a, M, F> Loader<'a, M, F>
38where
39 M: MapperAllSizes + Translate,
40 F: FrameAllocator<Size4KiB>,
41{
42 fn new(
43 kernel: Kernel<'a>,
44 page_table: &'a mut M,
45 frame_allocator: &'a mut F,
46 used_entries: &mut UsedLevel4Entries,
47 ) -> Result<Self, &'static str> {
48 log::info!("Elf file loaded at {:#p}", kernel.elf.input);
49 let kernel_offset = PhysAddr::new(&kernel.elf.input[0] as *const u8 as u64);
50 if !kernel_offset.is_aligned(PAGE_SIZE) {
51 return Err("Loaded kernel ELF file is not sufficiently aligned");
52 }
53
54 let elf_file = kernel.elf;
55 for program_header in elf_file.program_iter() {
56 program::sanity_check(program_header, &elf_file)?;
57 }
58
59 let virtual_address_offset = match elf_file.header.pt2.type_().as_type() {
60 header::Type::None => unimplemented!(),
61 header::Type::Relocatable => unimplemented!(),
62 header::Type::Executable => match kernel.config.mappings.kernel_base {
63 Mapping::Dynamic => VirtualAddressOffset::zero(),
64 _ => {
65 return Err(concat!(
66 "Invalid kernel_code mapping. ",
67 "Executable can only be mapped at virtual_address_offset 0."
68 ))
69 }
70 },
71 header::Type::SharedObject => {
72 let ElfMemoryRequirements {
73 size,
74 align,
75 min_addr,
76 } = calc_elf_memory_requirements(&elf_file);
77 match kernel.config.mappings.kernel_base {
78 Mapping::Dynamic => {
79 let offset = used_entries.get_free_address(size, align).as_u64();
80 VirtualAddressOffset::new(i128::from(offset) - i128::from(min_addr))
81 }
82 Mapping::FixedAddress(address) => {
83 VirtualAddressOffset::new(i128::from(address))
84 }
85 }
86 }
87 header::Type::Core => unimplemented!(),
88 header::Type::ProcessorSpecific(_) => unimplemented!(),
89 };
90 log::info!(
91 "virtual_address_offset: {:#x}",
92 virtual_address_offset.virtual_address_offset()
93 );
94
95 used_entries.mark_segments(elf_file.program_iter(), virtual_address_offset);
96
97 header::sanity_check(&elf_file)?;
98 let loader = Loader {
99 elf_file,
100 inner: Inner {
101 kernel_offset,
102 virtual_address_offset,
103 page_table,
104 frame_allocator,
105 },
106 };
107
108 Ok(loader)
109 }
110
111 fn load_segments(&mut self) -> Result<Option<TlsTemplate>, &'static str> {
112 let mut tls_template = None;
114 for program_header in self.elf_file.program_iter() {
115 match program_header.get_type()? {
116 Type::Load => self.inner.handle_load_segment(program_header)?,
117 Type::Tls => {
118 if tls_template.is_none() {
119 tls_template = Some(self.inner.handle_tls_segment(program_header)?);
120 } else {
121 return Err("multiple TLS segments not supported");
122 }
123 }
124 Type::Null
125 | Type::Dynamic
126 | Type::Interp
127 | Type::Note
128 | Type::ShLib
129 | Type::Phdr
130 | Type::GnuRelro
131 | Type::OsSpecific(_)
132 | Type::ProcessorSpecific(_) => {}
133 }
134 }
135
136 for program_header in self.elf_file.program_iter() {
138 if let Type::Dynamic = program_header.get_type()? {
139 self.inner
140 .handle_dynamic_segment(program_header, &self.elf_file)?
141 }
142 }
143
144 for program_header in self.elf_file.program_iter() {
147 if let Type::GnuRelro = program_header.get_type()? {
148 self.inner.handle_relro_segment(program_header);
149 }
150 }
151
152 self.inner.remove_copied_flags(&self.elf_file).unwrap();
153
154 Ok(tls_template)
155 }
156
157 fn entry_point(&self) -> VirtAddr {
158 VirtAddr::new(self.inner.virtual_address_offset + self.elf_file.header.pt2.entry_point())
159 }
160}
161
162impl<'a, M, F> Inner<'a, M, F>
163where
164 M: MapperAllSizes + Translate,
165 F: FrameAllocator<Size4KiB>,
166{
167 fn handle_load_segment(&mut self, segment: ProgramHeader) -> Result<(), &'static str> {
168 log::info!("Handling Segment: {:x?}", segment);
169
170 let phys_start_addr = self.kernel_offset + segment.offset();
171 let start_frame: PhysFrame = PhysFrame::containing_address(phys_start_addr);
172 let end_frame: PhysFrame =
173 PhysFrame::containing_address(phys_start_addr + segment.file_size() - 1u64);
174
175 let virt_start_addr = VirtAddr::new(self.virtual_address_offset + segment.virtual_addr());
176 let start_page: Page = Page::containing_address(virt_start_addr);
177
178 let mut segment_flags = Flags::PRESENT;
179 if !segment.flags().is_execute() {
180 segment_flags |= Flags::NO_EXECUTE;
181 }
182 if segment.flags().is_write() {
183 segment_flags |= Flags::WRITABLE;
184 }
185
186 for frame in PhysFrame::range_inclusive(start_frame, end_frame) {
188 let offset = frame - start_frame;
189 let page = start_page + offset;
190 let flusher = unsafe {
191 self.page_table
195 .map_to_with_table_flags(
196 page,
197 frame,
198 segment_flags,
199 Flags::PRESENT | Flags::WRITABLE,
200 self.frame_allocator,
201 )
202 .map_err(|_err| "map_to failed")?
203 };
204 flusher.ignore();
206 }
207
208 if segment.mem_size() > segment.file_size() {
210 self.handle_bss_section(&segment, segment_flags)?;
212 }
213
214 Ok(())
215 }
216
217 fn handle_bss_section(
218 &mut self,
219 segment: &ProgramHeader,
220 segment_flags: Flags,
221 ) -> Result<(), &'static str> {
222 log::info!("Mapping bss section");
223
224 let virt_start_addr = VirtAddr::new(self.virtual_address_offset + segment.virtual_addr());
225 let mem_size = segment.mem_size();
226 let file_size = segment.file_size();
227
228 let zero_start = virt_start_addr + file_size;
230 let zero_end = virt_start_addr + mem_size;
231
232 type PageArray = [u64; Size4KiB::SIZE as usize / 8];
234 const ZERO_ARRAY: PageArray = [0; Size4KiB::SIZE as usize / 8];
235
236 let data_bytes_before_zero = zero_start.as_u64() & 0xfff;
239 if data_bytes_before_zero != 0 {
240 let last_page = Page::containing_address(virt_start_addr + file_size - 1u64);
270 let new_frame = unsafe { self.make_mut(last_page) };
271 let new_bytes_ptr = new_frame.start_address().as_u64() as *mut u8;
272 unsafe {
273 core::ptr::write_bytes(
274 new_bytes_ptr.add(data_bytes_before_zero as usize),
275 0,
276 (Size4KiB::SIZE - data_bytes_before_zero) as usize,
277 );
278 }
279 }
280
281 let start_page: Page =
283 Page::containing_address(VirtAddr::new(align_up(zero_start.as_u64(), Size4KiB::SIZE)));
284 let end_page = Page::containing_address(zero_end - 1u64);
285 for page in Page::range_inclusive(start_page, end_page) {
286 let frame = self.frame_allocator.allocate_frame().unwrap();
288
289 let frame_ptr = frame.start_address().as_u64() as *mut PageArray;
291 unsafe { frame_ptr.write(ZERO_ARRAY) };
292
293 let flusher = unsafe {
295 self.page_table
299 .map_to_with_table_flags(
300 page,
301 frame,
302 segment_flags,
303 Flags::PRESENT | Flags::WRITABLE,
304 self.frame_allocator,
305 )
306 .map_err(|_err| "Failed to map new frame for bss memory")?
307 };
308 flusher.ignore();
310 }
311
312 Ok(())
313 }
314
315 fn copy_from(&self, addr: VirtAddr, buf: &mut [u8]) {
321 let end_inclusive_addr = Step::forward_checked(addr, buf.len() - 1)
326 .expect("end address outside of the virtual address space");
327 let start_page = Page::<Size4KiB>::containing_address(addr);
328 let end_inclusive_page = Page::<Size4KiB>::containing_address(end_inclusive_addr);
329
330 for page in start_page..=end_inclusive_page {
331 let phys_addr = self
333 .page_table
334 .translate_page(page)
335 .expect("address is not mapped to the kernel's memory space");
336
337 let page_start = page.start_address();
341 let page_end_inclusive = page.start_address() + 4095u64;
342
343 let start_copy_address = cmp::max(addr, page_start);
345 let end_inclusive_copy_address = cmp::min(end_inclusive_addr, page_end_inclusive);
346
347 let start_offset_in_frame = start_copy_address - page_start;
349 let end_inclusive_offset_in_frame = end_inclusive_copy_address - page_start;
350
351 let copy_len = end_inclusive_offset_in_frame - start_offset_in_frame + 1;
353
354 let start_phys_addr = phys_addr.start_address() + start_offset_in_frame;
356
357 let start_offset_in_buf = Step::steps_between(&addr, &start_copy_address).1.unwrap();
360
361 let src_ptr = start_phys_addr.as_u64() as *const u8;
364 let src = unsafe {
365 &*core::ptr::slice_from_raw_parts(src_ptr, copy_len as usize)
369 };
370
371 let dest = &mut buf[start_offset_in_buf..][..copy_len as usize];
373
374 dest.copy_from_slice(src);
376 }
377 }
378
379 unsafe fn copy_to(&mut self, addr: VirtAddr, buf: &[u8]) {
388 let end_inclusive_addr = Step::forward_checked(addr, buf.len() - 1)
393 .expect("the end address should be in the virtual address space");
394 let start_page = Page::<Size4KiB>::containing_address(addr);
395 let end_inclusive_page = Page::<Size4KiB>::containing_address(end_inclusive_addr);
396
397 for page in start_page..=end_inclusive_page {
398 let phys_addr = unsafe {
400 self.make_mut(page)
402 };
403
404 let page_start = page.start_address();
408 let page_end_inclusive = page.start_address() + 4095u64;
409
410 let start_copy_address = cmp::max(addr, page_start);
412 let end_inclusive_copy_address = cmp::min(end_inclusive_addr, page_end_inclusive);
413
414 let start_offset_in_frame = start_copy_address - page_start;
416 let end_inclusive_offset_in_frame = end_inclusive_copy_address - page_start;
417
418 let copy_len = end_inclusive_offset_in_frame - start_offset_in_frame + 1;
420
421 let start_phys_addr = phys_addr.start_address() + start_offset_in_frame;
423
424 let start_offset_in_buf = Step::steps_between(&addr, &start_copy_address).1.unwrap();
427
428 let dest_ptr = start_phys_addr.as_u64() as *mut u8;
431 let dest = unsafe {
432 &mut *core::ptr::slice_from_raw_parts_mut(dest_ptr, copy_len as usize)
436 };
437
438 let src = &buf[start_offset_in_buf..][..copy_len as usize];
440
441 dest.copy_from_slice(src);
443 }
444 }
445
446 unsafe fn make_mut(&mut self, page: Page) -> PhysFrame {
463 let (frame, flags) = match self.page_table.translate(page.start_address()) {
464 TranslateResult::Mapped {
465 frame,
466 offset: _,
467 flags,
468 } => (frame, flags),
469 TranslateResult::NotMapped => panic!("{:?} is not mapped", page),
470 TranslateResult::InvalidFrameAddress(_) => unreachable!(),
471 };
472 let frame = if let MappedFrame::Size4KiB(frame) = frame {
473 frame
474 } else {
475 unreachable!()
477 };
478
479 if flags.contains(COPIED) {
480 return frame;
482 }
483
484 let new_frame = self.frame_allocator.allocate_frame().unwrap();
486 let frame_ptr = frame.start_address().as_u64() as *const u8;
487 let new_frame_ptr = new_frame.start_address().as_u64() as *mut u8;
488 unsafe {
489 core::ptr::copy_nonoverlapping(frame_ptr, new_frame_ptr, Size4KiB::SIZE as usize);
490 }
491
492 self.page_table.unmap(page).unwrap().1.ignore();
494 let new_flags = flags | COPIED;
495 unsafe {
496 self.page_table
497 .map_to(page, new_frame, new_flags, self.frame_allocator)
498 .unwrap()
499 .ignore();
500 }
501
502 new_frame
503 }
504
505 fn remove_copied_flags(&mut self, elf_file: &ElfFile) -> Result<(), &'static str> {
507 for program_header in elf_file.program_iter() {
508 if let Type::Load = program_header.get_type()? {
509 let start = self.virtual_address_offset + program_header.virtual_addr();
510 let end = start + program_header.mem_size();
511 let start = VirtAddr::new(start);
512 let end = VirtAddr::new(end);
513 let start_page = Page::containing_address(start);
514 let end_page = Page::containing_address(end - 1u64);
515 for page in Page::<Size4KiB>::range_inclusive(start_page, end_page) {
516 let res = self.page_table.translate(page.start_address());
518 let flags = match res {
519 TranslateResult::Mapped {
520 frame: _,
521 offset: _,
522 flags,
523 } => flags,
524 TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
525 unreachable!("has the elf file not been mapped correctly?")
526 }
527 };
528
529 if flags.contains(COPIED) {
530 unsafe {
532 self.page_table
533 .update_flags(page, flags & !COPIED)
534 .unwrap()
535 .ignore();
536 }
537 }
538 }
539 }
540 }
541 Ok(())
542 }
543
544 fn handle_tls_segment(&mut self, segment: ProgramHeader) -> Result<TlsTemplate, &'static str> {
545 Ok(TlsTemplate {
546 start_addr: self.virtual_address_offset + segment.virtual_addr(),
547 mem_size: segment.mem_size(),
548 file_size: segment.file_size(),
549 })
550 }
551
552 fn handle_dynamic_segment(
553 &mut self,
554 segment: ProgramHeader,
555 elf_file: &ElfFile,
556 ) -> Result<(), &'static str> {
557 let data = segment.get_data(elf_file)?;
558 let data = if let SegmentData::Dynamic64(data) = data {
559 data
560 } else {
561 panic!("expected Dynamic64 segment")
562 };
563
564 let mut rela = None;
566 let mut rela_size = None;
567 let mut rela_ent = None;
568 for rel in data {
569 let tag = rel.get_tag()?;
570 match tag {
571 dynamic::Tag::Rela => {
572 let ptr = rel.get_ptr()?;
573 let prev = rela.replace(ptr);
574 if prev.is_some() {
575 return Err("Dynamic section contains more than one Rela entry");
576 }
577 }
578 dynamic::Tag::RelaSize => {
579 let val = rel.get_val()?;
580 let prev = rela_size.replace(val);
581 if prev.is_some() {
582 return Err("Dynamic section contains more than one RelaSize entry");
583 }
584 }
585 dynamic::Tag::RelaEnt => {
586 let val = rel.get_val()?;
587 let prev = rela_ent.replace(val);
588 if prev.is_some() {
589 return Err("Dynamic section contains more than one RelaEnt entry");
590 }
591 }
592 _ => {}
593 }
594 }
595 let offset = if let Some(rela) = rela {
596 rela
597 } else {
598 if rela_size.is_some() || rela_ent.is_some() {
601 return Err("Rela entry is missing but RelaSize or RelaEnt have been provided");
602 }
603
604 return Ok(());
605 };
606 let total_size = rela_size.ok_or("RelaSize entry is missing")?;
607 let entry_size = rela_ent.ok_or("RelaEnt entry is missing")?;
608
609 assert_eq!(
611 entry_size,
612 size_of::<Rela<u64>>() as u64,
613 "unsupported entry size: {entry_size}"
614 );
615
616 let num_entries = total_size / entry_size;
618 for idx in 0..num_entries {
619 let rela = self.read_relocation(offset, idx);
620 self.apply_relocation(rela, elf_file)?;
621 }
622
623 Ok(())
624 }
625
626 fn read_relocation(&self, relocation_table: u64, idx: u64) -> Rela<u64> {
628 let offset = relocation_table + size_of::<Rela<u64>>() as u64 * idx;
630 let value = self.virtual_address_offset + offset;
631 let addr = VirtAddr::try_new(value).expect("relocation table is outside the address space");
632
633 let mut buf = [0; 24];
635 self.copy_from(addr, &mut buf);
636
637 unsafe {
639 core::ptr::read_unaligned(&buf as *const u8 as *const Rela<u64>)
642 }
643 }
644
645 fn apply_relocation(
646 &mut self,
647 rela: Rela<u64>,
648 elf_file: &ElfFile,
649 ) -> Result<(), &'static str> {
650 let symbol_idx = rela.get_symbol_table_index();
651 assert_eq!(
652 symbol_idx, 0,
653 "relocations using the symbol table are not supported"
654 );
655
656 match rela.get_type() {
657 8 => {
659 check_is_in_load(elf_file, rela.get_offset())?;
662
663 let addr = self.virtual_address_offset + rela.get_offset();
665 let addr = VirtAddr::new(addr);
666
667 let value = self.virtual_address_offset + rela.get_addend();
669
670 unsafe {
672 self.copy_to(addr, &value.to_ne_bytes());
674 }
675 }
676 ty => unimplemented!("relocation type {:x} not supported", ty),
677 }
678
679 Ok(())
680 }
681
682 fn handle_relro_segment(&mut self, program_header: ProgramHeader) {
688 let start = self.virtual_address_offset + program_header.virtual_addr();
689 let end = start + program_header.mem_size();
690 let start = VirtAddr::new(start);
691 let end = VirtAddr::new(end);
692 let start_page = Page::containing_address(start);
693 let end_page = Page::containing_address(end - 1u64);
694 for page in Page::<Size4KiB>::range_inclusive(start_page, end_page) {
695 let res = self.page_table.translate(page.start_address());
697 let flags = match res {
698 TranslateResult::Mapped {
699 frame: _,
700 offset: _,
701 flags,
702 } => flags,
703 TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
704 unreachable!("has the elf file not been mapped correctly?")
705 }
706 };
707
708 if flags.contains(Flags::WRITABLE) {
709 unsafe {
711 self.page_table
712 .update_flags(page, flags & !Flags::WRITABLE)
713 .unwrap()
714 .ignore();
715 }
716 }
717 }
718 }
719}
720
721fn check_is_in_load(elf_file: &ElfFile, virt_offset: u64) -> Result<(), &'static str> {
723 for program_header in elf_file.program_iter() {
724 if let Type::Load = program_header.get_type()? {
725 if program_header.virtual_addr() <= virt_offset {
726 let offset_in_segment = virt_offset - program_header.virtual_addr();
727 if offset_in_segment < program_header.mem_size() {
728 return Ok(());
729 }
730 }
731 }
732 }
733 Err("offset is not in load segment")
734}
735
736pub fn load_kernel(
741 kernel: Kernel<'_>,
742 page_table: &mut (impl MapperAllSizes + Translate),
743 frame_allocator: &mut impl FrameAllocator<Size4KiB>,
744 used_entries: &mut UsedLevel4Entries,
745) -> Result<(VirtAddr, VirtAddr, Option<TlsTemplate>), &'static str> {
746 let mut loader = Loader::new(kernel, page_table, frame_allocator, used_entries)?;
747 let tls_template = loader.load_segments()?;
748
749 Ok((
750 VirtAddr::new(loader.inner.virtual_address_offset.virtual_address_offset() as u64),
751 loader.entry_point(),
752 tls_template,
753 ))
754}
755
756pub struct ElfMemoryRequirements {
758 pub size: u64,
760 pub align: u64,
762 pub min_addr: u64,
764}
765
766pub fn calc_elf_memory_requirements(elf_file: &ElfFile) -> ElfMemoryRequirements {
768 let load_program_headers = elf_file
770 .program_iter()
771 .filter(|h| matches!(h.get_type(), Ok(Type::Load)));
772 let max_addr = load_program_headers
773 .clone()
774 .map(|h| h.virtual_addr() + h.mem_size())
775 .max()
776 .unwrap_or(0);
777 let min_addr = load_program_headers
778 .clone()
779 .map(|h| h.virtual_addr())
780 .min()
781 .unwrap_or(0);
782 let size = max_addr - min_addr;
783 let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1);
784 ElfMemoryRequirements {
785 size,
786 align,
787 min_addr,
788 }
789}
790
791#[derive(Clone, Copy)]
794pub struct VirtualAddressOffset {
795 virtual_address_offset: i128,
796}
797
798impl VirtualAddressOffset {
799 pub fn zero() -> Self {
800 Self::new(0)
801 }
802
803 pub fn new(virtual_address_offset: i128) -> Self {
804 Self {
805 virtual_address_offset,
806 }
807 }
808
809 pub fn virtual_address_offset(&self) -> i128 {
810 self.virtual_address_offset
811 }
812}
813
814impl Add<u64> for VirtualAddressOffset {
815 type Output = u64;
816
817 fn add(self, offset: u64) -> Self::Output {
818 u64::try_from(
819 self.virtual_address_offset
820 .checked_add(i128::from(offset))
821 .unwrap(),
822 )
823 .unwrap()
824 }
825}