xenplatform/
boot.rs

1use std::slice;
2
3use log::debug;
4use slice_copy::copy;
5use xencall::{sys::CreateDomain, XenCall};
6
7use crate::{
8    error::{Error, Result},
9    mem::PhysicalPages,
10    sys::XEN_PAGE_SHIFT,
11    ImageLoader, PlatformKernelConfig, PlatformResourcesConfig,
12};
13
14#[derive(Debug, Default, Clone)]
15pub struct DomainSegment {
16    pub vstart: u64,
17    pub vend: u64,
18    pub pfn: u64,
19    pub addr: u64,
20    pub size: u64,
21    pub pages: u64,
22}
23
24pub struct BootDomain {
25    pub domid: u32,
26    pub call: XenCall,
27    pub page_size: u64,
28    pub virt_alloc_end: u64,
29    pub pfn_alloc_end: u64,
30    pub virt_pgtab_end: u64,
31    pub total_pages: u64,
32    pub target_pages: u64,
33    pub max_vcpus: u32,
34    pub image_info: BootImageInfo,
35    pub phys: PhysicalPages,
36    pub store_evtchn: u32,
37    pub store_mfn: u64,
38    pub initrd_segment: Option<DomainSegment>,
39    pub console_evtchn: u32,
40    pub console_mfn: u64,
41    pub cmdline: String,
42}
43
44impl BootDomain {
45    pub async fn alloc_module(&mut self, buffer: &[u8]) -> Result<DomainSegment> {
46        let segment = self.alloc_segment(0, buffer.len() as u64).await?;
47        let slice = unsafe { slice::from_raw_parts_mut(segment.addr as *mut u8, buffer.len()) };
48        copy(slice, buffer);
49        Ok(segment)
50    }
51
52    pub async fn alloc_segment(&mut self, start: u64, size: u64) -> Result<DomainSegment> {
53        debug!("alloc_segment {:#x} {:#x}", start, size);
54        if start > 0 {
55            self.alloc_padding_pages(start)?;
56        }
57
58        let local_page_size: u32 = (1i64 << XEN_PAGE_SHIFT) as u32;
59        let pages = size.div_ceil(local_page_size as u64);
60        let start = self.virt_alloc_end;
61
62        let mut segment = DomainSegment {
63            vstart: start,
64            vend: 0,
65            pfn: self.pfn_alloc_end,
66            addr: 0,
67            size,
68            pages,
69        };
70
71        self.chk_alloc_pages(pages)?;
72
73        let ptr = self.phys.pfn_to_ptr(segment.pfn, pages).await?;
74        segment.addr = ptr;
75        let slice = unsafe {
76            slice::from_raw_parts_mut(ptr as *mut u8, (pages * local_page_size as u64) as usize)
77        };
78        slice.fill(0);
79        segment.vend = self.virt_alloc_end;
80        debug!(
81            "alloc_segment {:#x} -> {:#x} (pfn {:#x} + {:#x} pages)",
82            start, segment.vend, segment.pfn, pages
83        );
84        Ok(segment)
85    }
86
87    pub fn alloc_padding_pages(&mut self, boundary: u64) -> Result<()> {
88        if (boundary & (self.page_size - 1)) != 0 {
89            return Err(Error::MemorySetupFailed("boundary is incorrect"));
90        }
91
92        if boundary < self.virt_alloc_end {
93            return Err(Error::MemorySetupFailed("boundary is below allocation end"));
94        }
95        let pages = (boundary - self.virt_alloc_end) / self.page_size;
96        self.chk_alloc_pages(pages)?;
97        Ok(())
98    }
99
100    pub fn chk_alloc_pages(&mut self, pages: u64) -> Result<()> {
101        if pages > self.total_pages
102            || self.pfn_alloc_end > self.total_pages
103            || pages > self.total_pages - self.pfn_alloc_end
104        {
105            return Err(Error::MemorySetupFailed("no more pages left"));
106        }
107
108        self.pfn_alloc_end += pages;
109        self.virt_alloc_end += pages * self.page_size;
110        Ok(())
111    }
112
113    pub fn alloc_page(&mut self) -> Result<DomainSegment> {
114        let start = self.virt_alloc_end;
115        let pfn = self.pfn_alloc_end;
116
117        self.chk_alloc_pages(1)?;
118        debug!("alloc_page {:#x} (pfn {:#x})", start, pfn);
119        Ok(DomainSegment {
120            vstart: start,
121            vend: (start + self.page_size) - 1,
122            pfn,
123            addr: 0,
124            size: 0,
125            pages: 1,
126        })
127    }
128
129    pub fn round_up(addr: u64, mask: u64) -> u64 {
130        addr | mask
131    }
132
133    pub fn bits_to_mask(bits: u64) -> u64 {
134        (1 << bits) - 1
135    }
136}
137
138#[async_trait::async_trait]
139pub trait BootSetupPlatform {
140    fn create_domain(&self, enable_iommu: bool) -> CreateDomain;
141    fn page_size(&self) -> u64;
142    fn page_shift(&self) -> u64;
143    fn needs_early_kernel(&self) -> bool;
144    fn hvm(&self) -> bool;
145
146    async fn initialize_early(&mut self, domain: &mut BootDomain) -> Result<()>;
147
148    async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()>;
149
150    async fn alloc_page_tables(&mut self, domain: &mut BootDomain)
151        -> Result<Option<DomainSegment>>;
152
153    async fn alloc_p2m_segment(&mut self, domain: &mut BootDomain)
154        -> Result<Option<DomainSegment>>;
155
156    async fn alloc_magic_pages(&mut self, domain: &mut BootDomain) -> Result<()>;
157
158    async fn setup_page_tables(&mut self, domain: &mut BootDomain) -> Result<()>;
159
160    async fn setup_shared_info(
161        &mut self,
162        domain: &mut BootDomain,
163        shared_info_frame: u64,
164    ) -> Result<()>;
165
166    async fn setup_start_info(
167        &mut self,
168        domain: &mut BootDomain,
169        shared_info_frame: u64,
170    ) -> Result<()>;
171
172    async fn bootlate(&mut self, domain: &mut BootDomain) -> Result<()>;
173
174    async fn gnttab_seed(&mut self, domain: &mut BootDomain) -> Result<()>;
175
176    async fn vcpu(&mut self, domain: &mut BootDomain) -> Result<()>;
177
178    async fn setup_hypercall_page(&mut self, domain: &mut BootDomain) -> Result<()>;
179
180    async fn initialize_internal(
181        &mut self,
182        domid: u32,
183        call: XenCall,
184        image_loader: &ImageLoader,
185        domain: &mut BootDomain,
186        kernel: &PlatformKernelConfig,
187    ) -> Result<()> {
188        self.initialize_early(domain).await?;
189
190        let mut initrd_segment = if !domain.image_info.unmapped_initrd && kernel.initrd.is_some() {
191            Some(domain.alloc_module(kernel.initrd.as_ref().unwrap()).await?)
192        } else {
193            None
194        };
195
196        let mut kernel_segment = if self.needs_early_kernel() {
197            Some(self.load_kernel_segment(image_loader, domain).await?)
198        } else {
199            None
200        };
201
202        self.initialize_memory(domain).await?;
203        domain.virt_alloc_end = domain.image_info.virt_base;
204
205        if kernel_segment.is_none() {
206            kernel_segment = Some(self.load_kernel_segment(image_loader, domain).await?);
207        }
208
209        if domain.image_info.unmapped_initrd && kernel.initrd.is_some() {
210            initrd_segment = Some(domain.alloc_module(kernel.initrd.as_ref().unwrap()).await?);
211        }
212
213        domain.initrd_segment = initrd_segment;
214        self.alloc_magic_pages(domain).await?;
215        domain.store_evtchn = call.evtchn_alloc_unbound(domid, 0).await?;
216        let _kernel_segment =
217            kernel_segment.ok_or(Error::MemorySetupFailed("kernel_segment missing"))?;
218        Ok(())
219    }
220
221    #[allow(clippy::too_many_arguments)]
222    async fn initialize(
223        &mut self,
224        domid: u32,
225        call: XenCall,
226        image_loader: &ImageLoader,
227        kernel: &PlatformKernelConfig,
228        resources: &PlatformResourcesConfig,
229    ) -> Result<BootDomain> {
230        let target_pages = resources.assigned_memory_mb << (20 - self.page_shift());
231        let total_pages = resources.max_memory_mb << (20 - self.page_shift());
232        let image_info = image_loader.parse(self.hvm()).await?;
233        let mut domain = BootDomain {
234            domid,
235            call: call.clone(),
236            virt_alloc_end: 0,
237            virt_pgtab_end: 0,
238            pfn_alloc_end: 0,
239            total_pages,
240            target_pages,
241            page_size: self.page_size(),
242            image_info,
243            console_evtchn: 0,
244            console_mfn: 0,
245            max_vcpus: resources.max_vcpus,
246            phys: PhysicalPages::new(call.clone(), domid, self.page_shift()),
247            initrd_segment: None,
248            store_evtchn: 0,
249            store_mfn: 0,
250            cmdline: kernel.cmdline.clone(),
251        };
252        match self
253            .initialize_internal(domid, call, image_loader, &mut domain, kernel)
254            .await
255        {
256            Ok(_) => Ok(domain),
257            Err(error) => {
258                domain.phys.unmap_all()?;
259                Err(error)
260            }
261        }
262    }
263
264    async fn boot_internal(
265        &mut self,
266        call: XenCall,
267        domid: u32,
268        domain: &mut BootDomain,
269    ) -> Result<()> {
270        let domain_info = call.get_domain_info(domid).await?;
271        let shared_info_frame = domain_info.shared_info_frame;
272        self.setup_page_tables(domain).await?;
273        self.setup_start_info(domain, shared_info_frame).await?;
274        self.setup_hypercall_page(domain).await?;
275        self.bootlate(domain).await?;
276        self.setup_shared_info(domain, shared_info_frame).await?;
277        self.vcpu(domain).await?;
278        self.gnttab_seed(domain).await?;
279        domain.phys.unmap_all()?;
280        Ok(())
281    }
282
283    async fn boot(&mut self, domid: u32, call: XenCall, domain: &mut BootDomain) -> Result<()> {
284        let result = self.boot_internal(call, domid, domain).await;
285        domain.phys.unmap_all()?;
286        result
287    }
288
289    async fn load_kernel_segment(
290        &mut self,
291        image_loader: &ImageLoader,
292        domain: &mut BootDomain,
293    ) -> Result<DomainSegment> {
294        let kernel_segment = domain
295            .alloc_segment(
296                domain.image_info.virt_kstart,
297                domain.image_info.virt_kend - domain.image_info.virt_kstart,
298            )
299            .await?;
300        let kernel_segment_ptr = kernel_segment.addr as *mut u8;
301        let kernel_segment_slice =
302            unsafe { slice::from_raw_parts_mut(kernel_segment_ptr, kernel_segment.size as usize) };
303        image_loader
304            .load(&domain.image_info, kernel_segment_slice)
305            .await?;
306        Ok(kernel_segment)
307    }
308}
309
310#[async_trait::async_trait]
311pub trait BootImageLoader {
312    async fn parse(&self, hvm: bool) -> Result<BootImageInfo>;
313    async fn load(&self, image_info: &BootImageInfo, dst: &mut [u8]) -> Result<()>;
314}
315
316#[derive(Debug)]
317pub struct BootImageInfo {
318    pub start: u64,
319    pub virt_base: u64,
320    pub virt_kstart: u64,
321    pub virt_kend: u64,
322    pub virt_hypercall: u64,
323    pub virt_entry: u64,
324    pub virt_p2m_base: u64,
325    pub unmapped_initrd: bool,
326}