bootloader_x86_64_common/
load_kernel.rs

1use crate::{level_4_entries::UsedLevel4Entries, PAGE_SIZE};
2use bootloader_api::{config::Mapping, info::TlsTemplate};
3use core::{cmp, iter::Step, mem::size_of, ops::Add};
4
5use x86_64::{
6    align_up,
7    structures::paging::{
8        mapper::{MappedFrame, MapperAllSizes, TranslateResult},
9        FrameAllocator, Page, PageSize, PageTableFlags as Flags, PhysFrame, Size4KiB, Translate,
10    },
11    PhysAddr, VirtAddr,
12};
13use xmas_elf::{
14    dynamic, header,
15    program::{self, ProgramHeader, SegmentData, Type},
16    sections::Rela,
17    ElfFile,
18};
19
20use super::Kernel;
21
22/// Used by [`Inner::make_mut`] and [`Inner::clean_copied_flag`].
23const COPIED: Flags = Flags::BIT_9;
24
25struct Loader<'a, M, F> {
26    elf_file: ElfFile<'a>,
27    inner: Inner<'a, M, F>,
28}
29
30struct Inner<'a, M, F> {
31    kernel_offset: PhysAddr,
32    virtual_address_offset: VirtualAddressOffset,
33    page_table: &'a mut M,
34    frame_allocator: &'a mut F,
35}
36
37impl<'a, M, F> Loader<'a, M, F>
38where
39    M: MapperAllSizes + Translate,
40    F: FrameAllocator<Size4KiB>,
41{
42    fn new(
43        kernel: Kernel<'a>,
44        page_table: &'a mut M,
45        frame_allocator: &'a mut F,
46        used_entries: &mut UsedLevel4Entries,
47    ) -> Result<Self, &'static str> {
48        log::info!("Elf file loaded at {:#p}", kernel.elf.input);
49        let kernel_offset = PhysAddr::new(&kernel.elf.input[0] as *const u8 as u64);
50        if !kernel_offset.is_aligned(PAGE_SIZE) {
51            return Err("Loaded kernel ELF file is not sufficiently aligned");
52        }
53
54        let elf_file = kernel.elf;
55        for program_header in elf_file.program_iter() {
56            program::sanity_check(program_header, &elf_file)?;
57        }
58
59        let virtual_address_offset = match elf_file.header.pt2.type_().as_type() {
60            header::Type::None => unimplemented!(),
61            header::Type::Relocatable => unimplemented!(),
62            header::Type::Executable => match kernel.config.mappings.kernel_base {
63                Mapping::Dynamic => VirtualAddressOffset::zero(),
64                _ => {
65                    return Err(concat!(
66                        "Invalid kernel_code mapping. ",
67                        "Executable can only be mapped at virtual_address_offset 0."
68                    ))
69                }
70            },
71            header::Type::SharedObject => {
72                let ElfMemoryRequirements {
73                    size,
74                    align,
75                    min_addr,
76                } = calc_elf_memory_requirements(&elf_file);
77                match kernel.config.mappings.kernel_base {
78                    Mapping::Dynamic => {
79                        let offset = used_entries.get_free_address(size, align).as_u64();
80                        VirtualAddressOffset::new(i128::from(offset) - i128::from(min_addr))
81                    }
82                    Mapping::FixedAddress(address) => {
83                        VirtualAddressOffset::new(i128::from(address))
84                    }
85                }
86            }
87            header::Type::Core => unimplemented!(),
88            header::Type::ProcessorSpecific(_) => unimplemented!(),
89        };
90        log::info!(
91            "virtual_address_offset: {:#x}",
92            virtual_address_offset.virtual_address_offset()
93        );
94
95        used_entries.mark_segments(elf_file.program_iter(), virtual_address_offset);
96
97        header::sanity_check(&elf_file)?;
98        let loader = Loader {
99            elf_file,
100            inner: Inner {
101                kernel_offset,
102                virtual_address_offset,
103                page_table,
104                frame_allocator,
105            },
106        };
107
108        Ok(loader)
109    }
110
111    fn load_segments(&mut self) -> Result<Option<TlsTemplate>, &'static str> {
112        // Load the segments into virtual memory.
113        let mut tls_template = None;
114        for program_header in self.elf_file.program_iter() {
115            match program_header.get_type()? {
116                Type::Load => self.inner.handle_load_segment(program_header)?,
117                Type::Tls => {
118                    if tls_template.is_none() {
119                        tls_template = Some(self.inner.handle_tls_segment(program_header)?);
120                    } else {
121                        return Err("multiple TLS segments not supported");
122                    }
123                }
124                Type::Null
125                | Type::Dynamic
126                | Type::Interp
127                | Type::Note
128                | Type::ShLib
129                | Type::Phdr
130                | Type::GnuRelro
131                | Type::OsSpecific(_)
132                | Type::ProcessorSpecific(_) => {}
133            }
134        }
135
136        // Apply relocations in virtual memory.
137        for program_header in self.elf_file.program_iter() {
138            if let Type::Dynamic = program_header.get_type()? {
139                self.inner
140                    .handle_dynamic_segment(program_header, &self.elf_file)?
141            }
142        }
143
144        // Mark some memory regions as read-only after relocations have been
145        // applied.
146        for program_header in self.elf_file.program_iter() {
147            if let Type::GnuRelro = program_header.get_type()? {
148                self.inner.handle_relro_segment(program_header);
149            }
150        }
151
152        self.inner.remove_copied_flags(&self.elf_file).unwrap();
153
154        Ok(tls_template)
155    }
156
157    fn entry_point(&self) -> VirtAddr {
158        VirtAddr::new(self.inner.virtual_address_offset + self.elf_file.header.pt2.entry_point())
159    }
160}
161
162impl<'a, M, F> Inner<'a, M, F>
163where
164    M: MapperAllSizes + Translate,
165    F: FrameAllocator<Size4KiB>,
166{
167    fn handle_load_segment(&mut self, segment: ProgramHeader) -> Result<(), &'static str> {
168        log::info!("Handling Segment: {:x?}", segment);
169
170        let phys_start_addr = self.kernel_offset + segment.offset();
171        let start_frame: PhysFrame = PhysFrame::containing_address(phys_start_addr);
172        let end_frame: PhysFrame =
173            PhysFrame::containing_address(phys_start_addr + segment.file_size() - 1u64);
174
175        let virt_start_addr = VirtAddr::new(self.virtual_address_offset + segment.virtual_addr());
176        let start_page: Page = Page::containing_address(virt_start_addr);
177
178        let mut segment_flags = Flags::PRESENT;
179        if !segment.flags().is_execute() {
180            segment_flags |= Flags::NO_EXECUTE;
181        }
182        if segment.flags().is_write() {
183            segment_flags |= Flags::WRITABLE;
184        }
185
186        // map all frames of the segment at the desired virtual address
187        for frame in PhysFrame::range_inclusive(start_frame, end_frame) {
188            let offset = frame - start_frame;
189            let page = start_page + offset;
190            let flusher = unsafe {
191                // The parent table flags need to be both readable and writable to
192                // support recursive page tables.
193                // See https://github.com/rust-osdev/bootloader/issues/443#issuecomment-2130010621
194                self.page_table
195                    .map_to_with_table_flags(
196                        page,
197                        frame,
198                        segment_flags,
199                        Flags::PRESENT | Flags::WRITABLE,
200                        self.frame_allocator,
201                    )
202                    .map_err(|_err| "map_to failed")?
203            };
204            // we operate on an inactive page table, so there's no need to flush anything
205            flusher.ignore();
206        }
207
208        // Handle .bss section (mem_size > file_size)
209        if segment.mem_size() > segment.file_size() {
210            // .bss section (or similar), which needs to be mapped and zeroed
211            self.handle_bss_section(&segment, segment_flags)?;
212        }
213
214        Ok(())
215    }
216
217    fn handle_bss_section(
218        &mut self,
219        segment: &ProgramHeader,
220        segment_flags: Flags,
221    ) -> Result<(), &'static str> {
222        log::info!("Mapping bss section");
223
224        let virt_start_addr = VirtAddr::new(self.virtual_address_offset + segment.virtual_addr());
225        let mem_size = segment.mem_size();
226        let file_size = segment.file_size();
227
228        // calculate virtual memory region that must be zeroed
229        let zero_start = virt_start_addr + file_size;
230        let zero_end = virt_start_addr + mem_size;
231
232        // a type alias that helps in efficiently clearing a page
233        type PageArray = [u64; Size4KiB::SIZE as usize / 8];
234        const ZERO_ARRAY: PageArray = [0; Size4KiB::SIZE as usize / 8];
235
236        // In some cases, `zero_start` might not be page-aligned. This requires some
237        // special treatment because we can't safely zero a frame of the original file.
238        let data_bytes_before_zero = zero_start.as_u64() & 0xfff;
239        if data_bytes_before_zero != 0 {
240            // The last non-bss frame of the segment consists partly of data and partly of bss
241            // memory, which must be zeroed. Unfortunately, the file representation might have
242            // reused the part of the frame that should be zeroed to store the next segment. This
243            // means that we can't simply overwrite that part with zeroes, as we might overwrite
244            // other data this way.
245            //
246            // Example:
247            //
248            //   XXXXXXXXXXXXXXX000000YYYYYYY000ZZZZZZZZZZZ     virtual memory (XYZ are data)
249            //   |·············|     /·····/   /·········/
250            //   |·············| ___/·····/   /·········/
251            //   |·············|/·····/‾‾‾   /·········/
252            //   |·············||·····|/·̅·̅·̅·̅·̅·····/‾‾‾‾
253            //   XXXXXXXXXXXXXXXYYYYYYYZZZZZZZZZZZ              file memory (zeros are not saved)
254            //   '       '       '       '        '
255            //   The areas filled with dots (`·`) indicate a mapping between virtual and file
256            //   memory. We see that the data regions `X`, `Y`, `Z` have a valid mapping, while
257            //   the regions that are initialized with 0 have not.
258            //
259            //   The ticks (`'`) below the file memory line indicate the start of a new frame. We
260            //   see that the last frames of the `X` and `Y` regions in the file are followed
261            //   by the bytes of the next region. So we can't zero these parts of the frame
262            //   because they are needed by other memory regions.
263            //
264            // To solve this problem, we need to allocate a new frame for the last segment page
265            // and copy all data content of the original frame over. Afterwards, we can zero
266            // the remaining part of the frame since the frame is no longer shared with other
267            // segments now.
268
269            let last_page = Page::containing_address(virt_start_addr + file_size - 1u64);
270            let new_frame = unsafe { self.make_mut(last_page) };
271            let new_bytes_ptr = new_frame.start_address().as_u64() as *mut u8;
272            unsafe {
273                core::ptr::write_bytes(
274                    new_bytes_ptr.add(data_bytes_before_zero as usize),
275                    0,
276                    (Size4KiB::SIZE - data_bytes_before_zero) as usize,
277                );
278            }
279        }
280
281        // map additional frames for `.bss` memory that is not present in source file
282        let start_page: Page =
283            Page::containing_address(VirtAddr::new(align_up(zero_start.as_u64(), Size4KiB::SIZE)));
284        let end_page = Page::containing_address(zero_end - 1u64);
285        for page in Page::range_inclusive(start_page, end_page) {
286            // allocate a new unused frame
287            let frame = self.frame_allocator.allocate_frame().unwrap();
288
289            // zero frame, utilizing identity-mapping
290            let frame_ptr = frame.start_address().as_u64() as *mut PageArray;
291            unsafe { frame_ptr.write(ZERO_ARRAY) };
292
293            // map frame
294            let flusher = unsafe {
295                // The parent table flags need to be both readable and writable to
296                // support recursive page tables.
297                // See https://github.com/rust-osdev/bootloader/issues/443#issuecomment-2130010621
298                self.page_table
299                    .map_to_with_table_flags(
300                        page,
301                        frame,
302                        segment_flags,
303                        Flags::PRESENT | Flags::WRITABLE,
304                        self.frame_allocator,
305                    )
306                    .map_err(|_err| "Failed to map new frame for bss memory")?
307            };
308            // we operate on an inactive page table, so we don't need to flush our changes
309            flusher.ignore();
310        }
311
312        Ok(())
313    }
314
315    /// Copy from the kernel address space.
316    ///
317    /// ## Panics
318    ///
319    /// Panics if a page is not mapped in `self.page_table`.
320    fn copy_from(&self, addr: VirtAddr, buf: &mut [u8]) {
321        // We can't know for sure that contiguous virtual address are contiguous
322        // in physical memory, so we iterate of the pages spanning the
323        // addresses, translate them to frames and copy the data.
324
325        let end_inclusive_addr = Step::forward_checked(addr, buf.len() - 1)
326            .expect("end address outside of the virtual address space");
327        let start_page = Page::<Size4KiB>::containing_address(addr);
328        let end_inclusive_page = Page::<Size4KiB>::containing_address(end_inclusive_addr);
329
330        for page in start_page..=end_inclusive_page {
331            // Translate the virtual page to the physical frame.
332            let phys_addr = self
333                .page_table
334                .translate_page(page)
335                .expect("address is not mapped to the kernel's memory space");
336
337            // Figure out which address range we want to copy from the frame.
338
339            // This page covers these addresses.
340            let page_start = page.start_address();
341            let page_end_inclusive = page.start_address() + 4095u64;
342
343            // We want to copy from the following address in this frame.
344            let start_copy_address = cmp::max(addr, page_start);
345            let end_inclusive_copy_address = cmp::min(end_inclusive_addr, page_end_inclusive);
346
347            // These are the offsets into the frame we want to copy from.
348            let start_offset_in_frame = start_copy_address - page_start;
349            let end_inclusive_offset_in_frame = end_inclusive_copy_address - page_start;
350
351            // Calculate how many bytes we want to copy from this frame.
352            let copy_len = end_inclusive_offset_in_frame - start_offset_in_frame + 1;
353
354            // Calculate the physical addresses.
355            let start_phys_addr = phys_addr.start_address() + start_offset_in_frame;
356
357            // These are the offsets from the start address. These correspond
358            // to the destination indices in `buf`.
359            let start_offset_in_buf = Step::steps_between(&addr, &start_copy_address).1.unwrap();
360
361            // Calculate the source slice.
362            // Utilize that frames are identity mapped.
363            let src_ptr = start_phys_addr.as_u64() as *const u8;
364            let src = unsafe {
365                // SAFETY: We know that this memory is valid because we got it
366                // as a result from a translation. There are not other
367                // references to it.
368                &*core::ptr::slice_from_raw_parts(src_ptr, copy_len as usize)
369            };
370
371            // Calculate the destination pointer.
372            let dest = &mut buf[start_offset_in_buf..][..copy_len as usize];
373
374            // Do the actual copy.
375            dest.copy_from_slice(src);
376        }
377    }
378
379    /// Write to the kernel address space.
380    ///
381    /// ## Safety
382    /// - `addr` should refer to a page mapped by a Load segment.
383    ///  
384    /// ## Panics
385    ///
386    /// Panics if a page is not mapped in `self.page_table`.
387    unsafe fn copy_to(&mut self, addr: VirtAddr, buf: &[u8]) {
388        // We can't know for sure that contiguous virtual address are contiguous
389        // in physical memory, so we iterate of the pages spanning the
390        // addresses, translate them to frames and copy the data.
391
392        let end_inclusive_addr = Step::forward_checked(addr, buf.len() - 1)
393            .expect("the end address should be in the virtual address space");
394        let start_page = Page::<Size4KiB>::containing_address(addr);
395        let end_inclusive_page = Page::<Size4KiB>::containing_address(end_inclusive_addr);
396
397        for page in start_page..=end_inclusive_page {
398            // Translate the virtual page to the physical frame.
399            let phys_addr = unsafe {
400                // SAFETY: The caller asserts that the pages are mapped by a Load segment.
401                self.make_mut(page)
402            };
403
404            // Figure out which address range we want to copy from the frame.
405
406            // This page covers these addresses.
407            let page_start = page.start_address();
408            let page_end_inclusive = page.start_address() + 4095u64;
409
410            // We want to copy from the following address in this frame.
411            let start_copy_address = cmp::max(addr, page_start);
412            let end_inclusive_copy_address = cmp::min(end_inclusive_addr, page_end_inclusive);
413
414            // These are the offsets into the frame we want to copy from.
415            let start_offset_in_frame = start_copy_address - page_start;
416            let end_inclusive_offset_in_frame = end_inclusive_copy_address - page_start;
417
418            // Calculate how many bytes we want to copy from this frame.
419            let copy_len = end_inclusive_offset_in_frame - start_offset_in_frame + 1;
420
421            // Calculate the physical addresses.
422            let start_phys_addr = phys_addr.start_address() + start_offset_in_frame;
423
424            // These are the offsets from the start address. These correspond
425            // to the destination indices in `buf`.
426            let start_offset_in_buf = Step::steps_between(&addr, &start_copy_address).1.unwrap();
427
428            // Calculate the source slice.
429            // Utilize that frames are identity mapped.
430            let dest_ptr = start_phys_addr.as_u64() as *mut u8;
431            let dest = unsafe {
432                // SAFETY: We know that this memory is valid because we got it
433                // as a result from a translation. There are not other
434                // references to it.
435                &mut *core::ptr::slice_from_raw_parts_mut(dest_ptr, copy_len as usize)
436            };
437
438            // Calculate the destination pointer.
439            let src = &buf[start_offset_in_buf..][..copy_len as usize];
440
441            // Do the actual copy.
442            dest.copy_from_slice(src);
443        }
444    }
445
446    /// This method is intended for making the memory loaded by a Load segment mutable.
447    ///
448    /// All memory from a Load segment starts out by mapped to the same frames that
449    /// contain the elf file. Thus writing to memory in that state will cause aliasing issues.
450    /// To avoid that, we allocate a new frame, copy all bytes from the old frame to the new frame,
451    /// and remap the page to the new frame. At this point the page no longer aliases the elf file
452    /// and we can write to it.
453    ///
454    /// When we map the new frame we also set [`COPIED`] flag in the page table flags, so that
455    /// we can detect if the frame has already been copied when we try to modify the page again.
456    ///
457    /// ## Safety
458    /// - `page` should be a page mapped by a Load segment.
459    ///  
460    /// ## Panics
461    /// Panics if the page is not mapped in `self.page_table`.
462    unsafe fn make_mut(&mut self, page: Page) -> PhysFrame {
463        let (frame, flags) = match self.page_table.translate(page.start_address()) {
464            TranslateResult::Mapped {
465                frame,
466                offset: _,
467                flags,
468            } => (frame, flags),
469            TranslateResult::NotMapped => panic!("{:?} is not mapped", page),
470            TranslateResult::InvalidFrameAddress(_) => unreachable!(),
471        };
472        let frame = if let MappedFrame::Size4KiB(frame) = frame {
473            frame
474        } else {
475            // We only map 4k pages.
476            unreachable!()
477        };
478
479        if flags.contains(COPIED) {
480            // The frame was already copied, we are free to modify it.
481            return frame;
482        }
483
484        // Allocate a new frame and copy the memory, utilizing that both frames are identity mapped.
485        let new_frame = self.frame_allocator.allocate_frame().unwrap();
486        let frame_ptr = frame.start_address().as_u64() as *const u8;
487        let new_frame_ptr = new_frame.start_address().as_u64() as *mut u8;
488        unsafe {
489            core::ptr::copy_nonoverlapping(frame_ptr, new_frame_ptr, Size4KiB::SIZE as usize);
490        }
491
492        // Replace the underlying frame and update the flags.
493        self.page_table.unmap(page).unwrap().1.ignore();
494        let new_flags = flags | COPIED;
495        unsafe {
496            self.page_table
497                .map_to(page, new_frame, new_flags, self.frame_allocator)
498                .unwrap()
499                .ignore();
500        }
501
502        new_frame
503    }
504
505    /// Cleans up the custom flags set by [`Inner::make_mut`].
506    fn remove_copied_flags(&mut self, elf_file: &ElfFile) -> Result<(), &'static str> {
507        for program_header in elf_file.program_iter() {
508            if let Type::Load = program_header.get_type()? {
509                let start = self.virtual_address_offset + program_header.virtual_addr();
510                let end = start + program_header.mem_size();
511                let start = VirtAddr::new(start);
512                let end = VirtAddr::new(end);
513                let start_page = Page::containing_address(start);
514                let end_page = Page::containing_address(end - 1u64);
515                for page in Page::<Size4KiB>::range_inclusive(start_page, end_page) {
516                    // Translate the page and get the flags.
517                    let res = self.page_table.translate(page.start_address());
518                    let flags = match res {
519                        TranslateResult::Mapped {
520                            frame: _,
521                            offset: _,
522                            flags,
523                        } => flags,
524                        TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
525                            unreachable!("has the elf file not been mapped correctly?")
526                        }
527                    };
528
529                    if flags.contains(COPIED) {
530                        // Remove the flag.
531                        unsafe {
532                            self.page_table
533                                .update_flags(page, flags & !COPIED)
534                                .unwrap()
535                                .ignore();
536                        }
537                    }
538                }
539            }
540        }
541        Ok(())
542    }
543
544    fn handle_tls_segment(&mut self, segment: ProgramHeader) -> Result<TlsTemplate, &'static str> {
545        Ok(TlsTemplate {
546            start_addr: self.virtual_address_offset + segment.virtual_addr(),
547            mem_size: segment.mem_size(),
548            file_size: segment.file_size(),
549        })
550    }
551
552    fn handle_dynamic_segment(
553        &mut self,
554        segment: ProgramHeader,
555        elf_file: &ElfFile,
556    ) -> Result<(), &'static str> {
557        let data = segment.get_data(elf_file)?;
558        let data = if let SegmentData::Dynamic64(data) = data {
559            data
560        } else {
561            panic!("expected Dynamic64 segment")
562        };
563
564        // Find the `Rela`, `RelaSize` and `RelaEnt` entries.
565        let mut rela = None;
566        let mut rela_size = None;
567        let mut rela_ent = None;
568        for rel in data {
569            let tag = rel.get_tag()?;
570            match tag {
571                dynamic::Tag::Rela => {
572                    let ptr = rel.get_ptr()?;
573                    let prev = rela.replace(ptr);
574                    if prev.is_some() {
575                        return Err("Dynamic section contains more than one Rela entry");
576                    }
577                }
578                dynamic::Tag::RelaSize => {
579                    let val = rel.get_val()?;
580                    let prev = rela_size.replace(val);
581                    if prev.is_some() {
582                        return Err("Dynamic section contains more than one RelaSize entry");
583                    }
584                }
585                dynamic::Tag::RelaEnt => {
586                    let val = rel.get_val()?;
587                    let prev = rela_ent.replace(val);
588                    if prev.is_some() {
589                        return Err("Dynamic section contains more than one RelaEnt entry");
590                    }
591                }
592                _ => {}
593            }
594        }
595        let offset = if let Some(rela) = rela {
596            rela
597        } else {
598            // The section doesn't contain any relocations.
599
600            if rela_size.is_some() || rela_ent.is_some() {
601                return Err("Rela entry is missing but RelaSize or RelaEnt have been provided");
602            }
603
604            return Ok(());
605        };
606        let total_size = rela_size.ok_or("RelaSize entry is missing")?;
607        let entry_size = rela_ent.ok_or("RelaEnt entry is missing")?;
608
609        // Make sure that the reported size matches our `Rela<u64>`.
610        assert_eq!(
611            entry_size,
612            size_of::<Rela<u64>>() as u64,
613            "unsupported entry size: {entry_size}"
614        );
615
616        // Apply the relocations.
617        let num_entries = total_size / entry_size;
618        for idx in 0..num_entries {
619            let rela = self.read_relocation(offset, idx);
620            self.apply_relocation(rela, elf_file)?;
621        }
622
623        Ok(())
624    }
625
626    /// Reads a relocation from a relocation table.
627    fn read_relocation(&self, relocation_table: u64, idx: u64) -> Rela<u64> {
628        // Calculate the address of the entry in the relocation table.
629        let offset = relocation_table + size_of::<Rela<u64>>() as u64 * idx;
630        let value = self.virtual_address_offset + offset;
631        let addr = VirtAddr::try_new(value).expect("relocation table is outside the address space");
632
633        // Read the Rela from the kernel address space.
634        let mut buf = [0; 24];
635        self.copy_from(addr, &mut buf);
636
637        // Convert the bytes we read into a `Rela<u64>`.
638        unsafe {
639            // SAFETY: Any bitpattern is valid for `Rela<u64>` and buf is
640            // valid for reads.
641            core::ptr::read_unaligned(&buf as *const u8 as *const Rela<u64>)
642        }
643    }
644
645    fn apply_relocation(
646        &mut self,
647        rela: Rela<u64>,
648        elf_file: &ElfFile,
649    ) -> Result<(), &'static str> {
650        let symbol_idx = rela.get_symbol_table_index();
651        assert_eq!(
652            symbol_idx, 0,
653            "relocations using the symbol table are not supported"
654        );
655
656        match rela.get_type() {
657            // R_AMD64_RELATIVE
658            8 => {
659                // Make sure that the relocation happens in memory mapped
660                // by a Load segment.
661                check_is_in_load(elf_file, rela.get_offset())?;
662
663                // Calculate the destination of the relocation.
664                let addr = self.virtual_address_offset + rela.get_offset();
665                let addr = VirtAddr::new(addr);
666
667                // Calculate the relocated value.
668                let value = self.virtual_address_offset + rela.get_addend();
669
670                // Write the relocated value to memory.
671                unsafe {
672                    // SAFETY: We just verified that the address is in a Load segment.
673                    self.copy_to(addr, &value.to_ne_bytes());
674                }
675            }
676            ty => unimplemented!("relocation type {:x} not supported", ty),
677        }
678
679        Ok(())
680    }
681
682    /// Mark a region of memory indicated by a GNU_RELRO segment as read-only.
683    ///
684    /// This is a security mitigation used to protect memory regions that
685    /// need to be writable while applying relocations, but should never be
686    /// written to after relocations have been applied.
687    fn handle_relro_segment(&mut self, program_header: ProgramHeader) {
688        let start = self.virtual_address_offset + program_header.virtual_addr();
689        let end = start + program_header.mem_size();
690        let start = VirtAddr::new(start);
691        let end = VirtAddr::new(end);
692        let start_page = Page::containing_address(start);
693        let end_page = Page::containing_address(end - 1u64);
694        for page in Page::<Size4KiB>::range_inclusive(start_page, end_page) {
695            // Translate the page and get the flags.
696            let res = self.page_table.translate(page.start_address());
697            let flags = match res {
698                TranslateResult::Mapped {
699                    frame: _,
700                    offset: _,
701                    flags,
702                } => flags,
703                TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
704                    unreachable!("has the elf file not been mapped correctly?")
705                }
706            };
707
708            if flags.contains(Flags::WRITABLE) {
709                // Remove the WRITABLE flag.
710                unsafe {
711                    self.page_table
712                        .update_flags(page, flags & !Flags::WRITABLE)
713                        .unwrap()
714                        .ignore();
715                }
716            }
717        }
718    }
719}
720
721/// Check that the virtual offset belongs to a load segment.
722fn check_is_in_load(elf_file: &ElfFile, virt_offset: u64) -> Result<(), &'static str> {
723    for program_header in elf_file.program_iter() {
724        if let Type::Load = program_header.get_type()? {
725            if program_header.virtual_addr() <= virt_offset {
726                let offset_in_segment = virt_offset - program_header.virtual_addr();
727                if offset_in_segment < program_header.mem_size() {
728                    return Ok(());
729                }
730            }
731        }
732    }
733    Err("offset is not in load segment")
734}
735
736/// Loads the kernel ELF file given in `bytes` in the given `page_table`.
737///
738/// Returns the kernel entry point address, it's thread local storage template (if any),
739/// and a structure describing which level 4 page table entries are in use.  
740pub fn load_kernel(
741    kernel: Kernel<'_>,
742    page_table: &mut (impl MapperAllSizes + Translate),
743    frame_allocator: &mut impl FrameAllocator<Size4KiB>,
744    used_entries: &mut UsedLevel4Entries,
745) -> Result<(VirtAddr, VirtAddr, Option<TlsTemplate>), &'static str> {
746    let mut loader = Loader::new(kernel, page_table, frame_allocator, used_entries)?;
747    let tls_template = loader.load_segments()?;
748
749    Ok((
750        VirtAddr::new(loader.inner.virtual_address_offset.virtual_address_offset() as u64),
751        loader.entry_point(),
752        tls_template,
753    ))
754}
755
756/// Basic information about the memory segments of an elf file.
757pub struct ElfMemoryRequirements {
758    /// total size needed for all segments
759    pub size: u64,
760    /// memory alignment for the elf file
761    pub align: u64,
762    /// the smallest virtual address used by the elf file
763    pub min_addr: u64,
764}
765
766/// Calculates basic requirements needed to allocate memory for an elf file.
767pub fn calc_elf_memory_requirements(elf_file: &ElfFile) -> ElfMemoryRequirements {
768    // Find the highest virtual memory address and the biggest alignment.
769    let load_program_headers = elf_file
770        .program_iter()
771        .filter(|h| matches!(h.get_type(), Ok(Type::Load)));
772    let max_addr = load_program_headers
773        .clone()
774        .map(|h| h.virtual_addr() + h.mem_size())
775        .max()
776        .unwrap_or(0);
777    let min_addr = load_program_headers
778        .clone()
779        .map(|h| h.virtual_addr())
780        .min()
781        .unwrap_or(0);
782    let size = max_addr - min_addr;
783    let align = load_program_headers.map(|h| h.align()).max().unwrap_or(1);
784    ElfMemoryRequirements {
785        size,
786        align,
787        min_addr,
788    }
789}
790
791/// A helper type used to offset virtual addresses for position independent
792/// executables.
793#[derive(Clone, Copy)]
794pub struct VirtualAddressOffset {
795    virtual_address_offset: i128,
796}
797
798impl VirtualAddressOffset {
799    pub fn zero() -> Self {
800        Self::new(0)
801    }
802
803    pub fn new(virtual_address_offset: i128) -> Self {
804        Self {
805            virtual_address_offset,
806        }
807    }
808
809    pub fn virtual_address_offset(&self) -> i128 {
810        self.virtual_address_offset
811    }
812}
813
814impl Add<u64> for VirtualAddressOffset {
815    type Output = u64;
816
817    fn add(self, offset: u64) -> Self::Output {
818        u64::try_from(
819            self.virtual_address_offset
820                .checked_add(i128::from(offset))
821                .unwrap(),
822        )
823        .unwrap()
824    }
825}