1use std::{
2 mem::size_of,
3 os::raw::{c_char, c_void},
4 slice,
5};
6
7use libc::munmap;
8use log::{debug, trace};
9use nix::errno::Errno;
10use slice_copy::copy;
11use xencall::sys::{
12 x8664VcpuGuestContext, CreateDomain, VcpuGuestContextAny, MMUEXT_PIN_L4_TABLE,
13 XEN_DOMCTL_CDF_IOMMU,
14};
15
16use crate::{
17 boot::{BootDomain, BootSetupPlatform, DomainSegment},
18 error::{Error, Result},
19 sys::{
20 GrantEntry, SUPERPAGE_2MB_NR_PFNS, SUPERPAGE_2MB_SHIFT, SUPERPAGE_BATCH_SIZE,
21 VGCF_IN_KERNEL, VGCF_ONLINE, XEN_PAGE_SHIFT,
22 },
23};
24
25pub const X86_PAGE_SHIFT: u64 = 12;
26pub const X86_PAGE_SIZE: u64 = 1 << X86_PAGE_SHIFT;
27pub const X86_VIRT_BITS: u64 = 48;
28pub const X86_VIRT_MASK: u64 = (1 << X86_VIRT_BITS) - 1;
29pub const X86_PGTABLE_LEVELS: u64 = 4;
30pub const X86_PGTABLE_LEVEL_SHIFT: u64 = 9;
31
32#[repr(C)]
33#[derive(Debug, Clone, Default)]
34pub struct PageTableMappingLevel {
35 pub from: u64,
36 pub to: u64,
37 pub pfn: u64,
38 pub pgtables: usize,
39}
40
41#[repr(C)]
42#[derive(Debug, Clone, Default)]
43pub struct PageTableMapping {
44 pub area: PageTableMappingLevel,
45 pub levels: [PageTableMappingLevel; X86_PGTABLE_LEVELS as usize],
46}
47
48pub const X86_PAGE_TABLE_MAX_MAPPINGS: usize = 2;
49
50#[repr(C)]
51#[derive(Debug, Clone, Default)]
52pub struct PageTable {
53 pub mappings_count: usize,
54 pub mappings: [PageTableMapping; X86_PAGE_TABLE_MAX_MAPPINGS],
55}
56
57#[repr(C)]
58#[derive(Debug)]
59pub struct StartInfoConsole {
60 pub mfn: u64,
61 pub evtchn: u32,
62}
63
64pub const MAX_GUEST_CMDLINE: usize = 1024;
65
66#[repr(C)]
67#[derive(Debug)]
68pub struct StartInfo {
69 pub magic: [c_char; 32],
70 pub nr_pages: u64,
71 pub shared_info: u64,
72 pub flags: u32,
73 pub store_mfn: u64,
74 pub store_evtchn: u32,
75 pub console: StartInfoConsole,
76 pub pt_base: u64,
77 pub nr_pt_frames: u64,
78 pub mfn_list: u64,
79 pub mod_start: u64,
80 pub mod_len: u64,
81 pub cmdline: [c_char; MAX_GUEST_CMDLINE],
82 pub first_p2m_pfn: u64,
83 pub nr_p2m_frames: u64,
84}
85
86pub const X86_GUEST_MAGIC: &str = "xen-3.0-x86_64";
87
88#[repr(C)]
89#[derive(Debug)]
90pub struct ArchVcpuInfo {
91 pub cr2: u64,
92 pub pad: u64,
93}
94
95#[repr(C)]
96#[derive(Debug)]
97pub struct VcpuInfoTime {
98 pub version: u32,
99 pub pad0: u32,
100 pub tsc_timestamp: u64,
101 pub system_time: u64,
102 pub tsc_to_system_mul: u32,
103 pub tsc_shift: i8,
104 pub flags: u8,
105 pub pad1: [u8; 2],
106}
107
108#[repr(C)]
109#[derive(Debug)]
110pub struct VcpuInfo {
111 pub evtchn_upcall_pending: u8,
112 pub evtchn_upcall_mask: u8,
113 pub evtchn_pending_sel: u64,
114 pub arch_vcpu_info: ArchVcpuInfo,
115 pub vcpu_info_time: VcpuInfoTime,
116}
117
118#[repr(C)]
119#[derive(Debug)]
120pub struct SharedInfo {
121 pub vcpu_info: [VcpuInfo; 32],
122 pub evtchn_pending: [u64; u64::BITS as usize],
123 pub evtchn_mask: [u64; u64::BITS as usize],
124 pub wc_version: u32,
125 pub wc_sec: u32,
126 pub wc_nsec: u32,
127 pub wc_sec_hi: u32,
128 pub max_pfn: u64,
130 pub pfn_to_mfn_frame_list_list: u64,
131 pub nmi_reason: u64,
132 pub p2m_cr3: u64,
133 pub p2m_vaddr: u64,
134 pub p2m_generation: u64,
135}
136
137#[derive(Debug)]
138struct VmemRange {
139 start: u64,
140 end: u64,
141 _flags: u32,
142 _nid: u32,
143}
144
145#[derive(Default, Clone)]
146pub struct X86PvPlatform {
147 table: PageTable,
148 p2m_segment: Option<DomainSegment>,
149 page_table_segment: Option<DomainSegment>,
150 start_info_segment: Option<DomainSegment>,
151 boot_stack_segment: Option<DomainSegment>,
152 xenstore_segment: Option<DomainSegment>,
153}
154
155impl X86PvPlatform {
156 pub fn new() -> Self {
157 Self {
158 ..Default::default()
159 }
160 }
161
162 const PAGE_PRESENT: u64 = 0x001;
163 const PAGE_RW: u64 = 0x002;
164 const PAGE_USER: u64 = 0x004;
165 const PAGE_ACCESSED: u64 = 0x020;
166 const PAGE_DIRTY: u64 = 0x040;
167 fn get_pg_prot(&mut self, l: usize, pfn: u64) -> u64 {
168 let prot = [
169 X86PvPlatform::PAGE_PRESENT | X86PvPlatform::PAGE_RW | X86PvPlatform::PAGE_ACCESSED,
170 X86PvPlatform::PAGE_PRESENT
171 | X86PvPlatform::PAGE_RW
172 | X86PvPlatform::PAGE_ACCESSED
173 | X86PvPlatform::PAGE_DIRTY
174 | X86PvPlatform::PAGE_USER,
175 X86PvPlatform::PAGE_PRESENT
176 | X86PvPlatform::PAGE_RW
177 | X86PvPlatform::PAGE_ACCESSED
178 | X86PvPlatform::PAGE_DIRTY
179 | X86PvPlatform::PAGE_USER,
180 X86PvPlatform::PAGE_PRESENT
181 | X86PvPlatform::PAGE_RW
182 | X86PvPlatform::PAGE_ACCESSED
183 | X86PvPlatform::PAGE_DIRTY
184 | X86PvPlatform::PAGE_USER,
185 ];
186
187 let prot = prot[l];
188 if l > 0 {
189 return prot;
190 }
191
192 for m in 0..self.table.mappings_count {
193 let map = &self.table.mappings[m];
194 let pfn_s = map.levels[(X86_PGTABLE_LEVELS - 1) as usize].pfn;
195 let pfn_e = map.area.pgtables as u64 + pfn_s;
196 if pfn >= pfn_s && pfn < pfn_e {
197 return prot & !X86PvPlatform::PAGE_RW;
198 }
199 }
200 prot
201 }
202
203 fn count_page_tables(
204 &mut self,
205 domain: &mut BootDomain,
206 from: u64,
207 to: u64,
208 pfn: u64,
209 ) -> Result<usize> {
210 debug!("counting pgtables from={} to={} pfn={}", from, to, pfn);
211 if self.table.mappings_count == X86_PAGE_TABLE_MAX_MAPPINGS {
212 return Err(Error::MemorySetupFailed("max page table count reached"));
213 }
214
215 let m = self.table.mappings_count;
216
217 let pfn_end = pfn + ((to - from) >> X86_PAGE_SHIFT);
218 if pfn_end >= domain.phys.p2m_size() {
219 return Err(Error::MemorySetupFailed("pfn_end greater than p2m size"));
220 }
221
222 for idx in 0..self.table.mappings_count {
223 if from < self.table.mappings[idx].area.to && to > self.table.mappings[idx].area.from {
224 return Err(Error::MemorySetupFailed("page table calculation failed"));
225 }
226 }
227 let mut map = PageTableMapping::default();
228 map.area.from = from & X86_VIRT_MASK;
229 map.area.to = to & X86_VIRT_MASK;
230
231 for l in (0usize..X86_PGTABLE_LEVELS as usize).rev() {
232 map.levels[l].pfn = domain.pfn_alloc_end + map.area.pgtables as u64;
233 if l as u64 == X86_PGTABLE_LEVELS - 1 {
234 if self.table.mappings_count == 0 {
235 map.levels[l].from = 0;
236 map.levels[l].to = X86_VIRT_MASK;
237 map.levels[l].pgtables = 1;
238 map.area.pgtables += 1;
239 }
240 continue;
241 }
242
243 let bits = X86_PAGE_SHIFT + (l + 1) as u64 * X86_PGTABLE_LEVEL_SHIFT;
244 let mask = BootDomain::bits_to_mask(bits);
245 map.levels[l].from = map.area.from & !mask;
246 map.levels[l].to = map.area.to | mask;
247
248 for cmp in &mut self.table.mappings[0..self.table.mappings_count] {
249 if cmp.levels[l].from == cmp.levels[l].to {
250 continue;
251 }
252
253 if map.levels[l].from >= cmp.levels[l].from && map.levels[l].to <= cmp.levels[l].to
254 {
255 map.levels[l].from = 0;
256 map.levels[l].to = 0;
257 break;
258 }
259
260 if map.levels[l].from >= cmp.levels[l].from
261 && map.levels[l].from <= cmp.levels[l].to
262 {
263 map.levels[l].from = cmp.levels[l].to + 1;
264 }
265
266 if map.levels[l].to >= cmp.levels[l].from && map.levels[l].to <= cmp.levels[l].to {
267 map.levels[l].to = cmp.levels[l].from - 1;
268 }
269 }
270
271 if map.levels[l].from < map.levels[l].to {
272 map.levels[l].pgtables =
273 (((map.levels[l].to - map.levels[l].from) >> bits) + 1) as usize;
274 }
275
276 debug!(
277 "count_pgtables {:#x}/{}: {:#x} -> {:#x}, {} tables",
278 mask, bits, map.levels[l].from, map.levels[l].to, map.levels[l].pgtables
279 );
280 map.area.pgtables += map.levels[l].pgtables;
281 }
282 self.table.mappings[m] = map;
283 Ok(m)
284 }
285}
286
287#[async_trait::async_trait]
288impl BootSetupPlatform for X86PvPlatform {
289 fn create_domain(&self, enable_iommu: bool) -> CreateDomain {
290 CreateDomain {
291 flags: if enable_iommu {
292 XEN_DOMCTL_CDF_IOMMU
293 } else {
294 0
295 },
296 ..Default::default()
297 }
298 }
299
300 fn page_size(&self) -> u64 {
301 X86_PAGE_SIZE
302 }
303
304 fn page_shift(&self) -> u64 {
305 X86_PAGE_SHIFT
306 }
307
308 fn needs_early_kernel(&self) -> bool {
309 false
310 }
311
312 fn hvm(&self) -> bool {
313 false
314 }
315
316 async fn initialize_early(&mut self, _: &mut BootDomain) -> Result<()> {
317 Ok(())
318 }
319
320 async fn initialize_memory(&mut self, domain: &mut BootDomain) -> Result<()> {
321 domain.call.set_address_size(domain.domid, 64).await?;
322 domain
323 .call
324 .claim_pages(domain.domid, domain.total_pages)
325 .await?;
326 let mut vmemranges: Vec<VmemRange> = Vec::new();
327 let stub = VmemRange {
328 start: 0,
329 end: domain.total_pages << XEN_PAGE_SHIFT,
330 _flags: 0,
331 _nid: 0,
332 };
333 vmemranges.push(stub);
334 let mut p2m_size: u64 = 0;
335 let mut total: u64 = 0;
336 for range in &vmemranges {
337 total += (range.end - range.start) >> XEN_PAGE_SHIFT;
338 p2m_size = p2m_size.max(range.end >> XEN_PAGE_SHIFT);
339 }
340
341 if total != domain.total_pages {
342 return Err(Error::MemorySetupFailed("total pages mismatch"));
343 }
344
345 let mut p2m = vec![u64::MAX; p2m_size as usize];
346 for range in &vmemranges {
347 let mut extents_init = vec![0u64; SUPERPAGE_BATCH_SIZE as usize];
348 let pages = (range.end - range.start) >> XEN_PAGE_SHIFT;
349 let pfn_base = range.start >> XEN_PAGE_SHIFT;
350
351 for pfn in pfn_base..pfn_base + pages {
352 p2m[pfn as usize] = pfn;
353 }
354
355 let mut super_pages = pages >> SUPERPAGE_2MB_SHIFT;
356 let mut pfn_base_idx: u64 = pfn_base;
357 while super_pages > 0 {
358 let count = super_pages.min(SUPERPAGE_BATCH_SIZE);
359 super_pages -= count;
360
361 let mut j: usize = 0;
362 let mut pfn: u64 = pfn_base_idx;
363 loop {
364 if pfn >= pfn_base_idx + (count << SUPERPAGE_2MB_SHIFT) {
365 break;
366 }
367 extents_init[j] = p2m[pfn as usize];
368 pfn += SUPERPAGE_2MB_NR_PFNS;
369 j += 1;
370 }
371
372 let extents_init_slice = extents_init.as_slice();
373 let extents = domain
374 .call
375 .populate_physmap(
376 domain.domid,
377 count,
378 SUPERPAGE_2MB_SHIFT as u32,
379 0,
380 &extents_init_slice[0usize..count as usize],
381 )
382 .await?;
383
384 pfn = pfn_base_idx;
385 for mfn in extents {
386 for k in 0..SUPERPAGE_2MB_NR_PFNS {
387 p2m[pfn as usize] = mfn + k;
388 pfn += 1;
389 }
390 }
391 pfn_base_idx = pfn;
392 }
393
394 let mut j = pfn_base_idx - pfn_base;
395 loop {
396 if j >= pages {
397 break;
398 }
399
400 let allocsz = (1024 * 1024).min(pages - j);
401 let p2m_idx = (pfn_base + j) as usize;
402 let p2m_end_idx = p2m_idx + allocsz as usize;
403 let input_extent_starts = &p2m[p2m_idx..p2m_end_idx];
404 let result = domain
405 .call
406 .populate_physmap(domain.domid, allocsz, 0, 0, input_extent_starts)
407 .await?;
408
409 if result.len() != allocsz as usize {
410 return Err(Error::PopulatePhysmapFailed(
411 allocsz as usize,
412 result.len(),
413 input_extent_starts.len(),
414 ));
415 }
416
417 for (i, item) in result.iter().enumerate() {
418 let p = (pfn_base + j + i as u64) as usize;
419 let m = *item;
420 p2m[p] = m;
421 }
422 j += allocsz;
423 }
424 }
425
426 domain.phys.load_p2m(p2m);
427 domain.call.claim_pages(domain.domid, 0).await?;
428 Ok(())
429 }
430
431 async fn alloc_page_tables(
432 &mut self,
433 domain: &mut BootDomain,
434 ) -> Result<Option<DomainSegment>> {
435 let mut extra_pages = 1;
436 extra_pages += (512 * 1024) / X86_PAGE_SIZE;
437 let mut pages = extra_pages;
438
439 let mut try_virt_end: u64;
440 let mut m: usize;
441 loop {
442 try_virt_end = BootDomain::round_up(
443 domain.virt_alloc_end + pages * X86_PAGE_SIZE,
444 BootDomain::bits_to_mask(22),
445 );
446 m = self.count_page_tables(domain, domain.image_info.virt_base, try_virt_end, 0)?;
447 pages = self.table.mappings[m].area.pgtables as u64 + extra_pages;
448 if domain.virt_alloc_end + pages * X86_PAGE_SIZE <= try_virt_end + 1 {
449 break;
450 }
451 }
452
453 self.table.mappings[m].area.pfn = 0;
454 self.table.mappings_count += 1;
455 domain.virt_pgtab_end = try_virt_end + 1;
456 let size = self.table.mappings[m].area.pgtables as u64 * X86_PAGE_SIZE;
457 let segment = domain.alloc_segment(0, size).await?;
458 debug!(
459 "alloc_page_tables table={:?} segment={:?}",
460 self.table, segment
461 );
462 Ok(Some(segment))
463 }
464
465 async fn alloc_p2m_segment(
466 &mut self,
467 domain: &mut BootDomain,
468 ) -> Result<Option<DomainSegment>> {
469 let mut p2m_alloc_size =
470 ((domain.phys.p2m_size() * 8) + X86_PAGE_SIZE - 1) & !(X86_PAGE_SIZE - 1);
471 let from = domain.image_info.virt_p2m_base;
472 let to = from + p2m_alloc_size - 1;
473 let m = self.count_page_tables(domain, from, to, domain.pfn_alloc_end)?;
474
475 let pgtables: usize;
476 {
477 let map = &mut self.table.mappings[m];
478 map.area.pfn = domain.pfn_alloc_end;
479 for lvl_idx in 0..4 {
480 map.levels[lvl_idx].pfn += p2m_alloc_size >> X86_PAGE_SHIFT;
481 }
482 pgtables = map.area.pgtables;
483 }
484 self.table.mappings_count += 1;
485 p2m_alloc_size += (pgtables << X86_PAGE_SHIFT) as u64;
486 let p2m_segment = domain.alloc_segment(0, p2m_alloc_size).await?;
487 Ok(Some(p2m_segment))
488 }
489
490 async fn alloc_magic_pages(&mut self, domain: &mut BootDomain) -> Result<()> {
491 if domain.image_info.virt_p2m_base >= domain.image_info.virt_base
492 || (domain.image_info.virt_p2m_base & ((1 << self.page_shift()) - 1)) != 0
493 {
494 self.p2m_segment = self.alloc_p2m_segment(domain).await?;
495 }
496 self.start_info_segment = Some(domain.alloc_page()?);
497 self.xenstore_segment = Some(domain.alloc_page()?);
498 domain.store_mfn = domain.phys.p2m[self.xenstore_segment.as_ref().unwrap().pfn as usize];
499 let evtchn = domain.call.evtchn_alloc_unbound(domain.domid, 0).await?;
500 let page = domain.alloc_page()?;
501 domain.console_evtchn = evtchn;
502 domain.console_mfn = domain.phys.p2m[page.pfn as usize];
503 self.page_table_segment = self.alloc_page_tables(domain).await?;
504 self.boot_stack_segment = Some(domain.alloc_page()?);
505
506 if domain.virt_pgtab_end > 0 {
507 domain.alloc_padding_pages(domain.virt_pgtab_end)?;
508 }
509
510 if self.p2m_segment.is_none() {
511 if let Some(mut p2m_segment) = self.alloc_p2m_segment(domain).await? {
512 p2m_segment.vstart = domain.image_info.virt_p2m_base;
513 self.p2m_segment = Some(p2m_segment);
514 }
515 }
516
517 Ok(())
518 }
519
520 async fn setup_page_tables(&mut self, domain: &mut BootDomain) -> Result<()> {
521 let p2m_segment = self
522 .p2m_segment
523 .as_ref()
524 .ok_or(Error::MemorySetupFailed("p2m_segment missing"))?;
525 let p2m_guest = unsafe {
526 slice::from_raw_parts_mut(
527 p2m_segment.addr as *mut u64,
528 domain.phys.p2m_size() as usize,
529 )
530 };
531 copy(p2m_guest, &domain.phys.p2m);
532
533 for l in (0usize..X86_PGTABLE_LEVELS as usize).rev() {
534 for m1 in 0usize..self.table.mappings_count {
535 let map1 = &self.table.mappings[m1];
536 let from = map1.levels[l].from;
537 let to = map1.levels[l].to;
538 let pg_ptr = domain.phys.pfn_to_ptr(map1.levels[l].pfn, 0).await? as *mut u64;
539 for m2 in 0usize..self.table.mappings_count {
540 let map2 = &self.table.mappings[m2];
541 let lvl = if l > 0 {
542 &map2.levels[l - 1]
543 } else {
544 &map2.area
545 };
546
547 if l > 0 && lvl.pgtables == 0 {
548 continue;
549 }
550
551 if lvl.from >= to || lvl.to <= from {
552 continue;
553 }
554
555 let p_s = (std::cmp::max(from, lvl.from) - from)
556 >> (X86_PAGE_SHIFT + l as u64 * X86_PGTABLE_LEVEL_SHIFT);
557 let p_e = (std::cmp::min(to, lvl.to) - from)
558 >> (X86_PAGE_SHIFT + l as u64 * X86_PGTABLE_LEVEL_SHIFT);
559 let rhs = X86_PAGE_SHIFT as usize + l * X86_PGTABLE_LEVEL_SHIFT as usize;
560 let mut pfn = ((std::cmp::max(from, lvl.from) - lvl.from) >> rhs) + lvl.pfn;
561
562 debug!(
563 "setup_page_tables lvl={} map_1={} map_2={} pfn={:#x} p_s={:#x} p_e={:#x}",
564 l, m1, m2, pfn, p_s, p_e
565 );
566
567 let pg = unsafe { slice::from_raw_parts_mut(pg_ptr, (p_e + 1) as usize) };
568 for p in p_s..p_e + 1 {
569 let prot = self.get_pg_prot(l, pfn);
570 let pfn_paddr = domain.phys.p2m[pfn as usize] << X86_PAGE_SHIFT;
571 let value = pfn_paddr | prot;
572 pg[p as usize] = value;
573 pfn += 1;
574 }
575 }
576 }
577 }
578 Ok(())
579 }
580
581 async fn setup_shared_info(
582 &mut self,
583 domain: &mut BootDomain,
584 shared_info_frame: u64,
585 ) -> Result<()> {
586 let info = domain
587 .phys
588 .map_foreign_pages(shared_info_frame, X86_PAGE_SIZE)
589 .await? as *mut SharedInfo;
590 unsafe {
591 let size = size_of::<SharedInfo>();
592 let info_as_buff = slice::from_raw_parts_mut(info as *mut u8, size);
593 info_as_buff.fill(0);
594 for i in 0..32 {
595 (*info).vcpu_info[i].evtchn_upcall_mask = 1;
596 }
597 trace!("setup_shared_info shared_info={:?}", *info);
598 }
599 Ok(())
600 }
601
602 async fn setup_start_info(
603 &mut self,
604 domain: &mut BootDomain,
605 shared_info_frame: u64,
606 ) -> Result<()> {
607 let start_info_segment = self
608 .start_info_segment
609 .as_ref()
610 .ok_or(Error::MemorySetupFailed("start_info_segment missing"))?;
611
612 let ptr = domain.phys.pfn_to_ptr(start_info_segment.pfn, 1).await?;
613 let byte_slice =
614 unsafe { slice::from_raw_parts_mut(ptr as *mut u8, X86_PAGE_SIZE as usize) };
615 byte_slice.fill(0);
616 let info = ptr as *mut StartInfo;
617
618 let page_table_segment = self
619 .page_table_segment
620 .as_ref()
621 .ok_or(Error::MemorySetupFailed("page_table_segment missing"))?;
622 let p2m_segment = self
623 .p2m_segment
624 .as_ref()
625 .ok_or(Error::MemorySetupFailed("p2m_segment missing"))?;
626 let xenstore_segment = self
627 .xenstore_segment
628 .as_ref()
629 .ok_or(Error::MemorySetupFailed("xenstore_segment missing"))?;
630 unsafe {
631 for (i, c) in X86_GUEST_MAGIC.chars().enumerate() {
632 (*info).magic[i] = c as c_char;
633 }
634 (*info).magic[X86_GUEST_MAGIC.len()] = 0 as c_char;
635 (*info).nr_pages = domain.total_pages;
636 (*info).shared_info = shared_info_frame << X86_PAGE_SHIFT;
637 (*info).pt_base = page_table_segment.vstart;
638 (*info).nr_pt_frames = self.table.mappings[0].area.pgtables as u64;
639 (*info).mfn_list = p2m_segment.vstart;
640 (*info).first_p2m_pfn = p2m_segment.pfn;
641 (*info).nr_p2m_frames = p2m_segment.pages;
642 (*info).flags = 0;
643 (*info).store_evtchn = domain.store_evtchn;
644 (*info).store_mfn = domain.phys.p2m[xenstore_segment.pfn as usize];
645 (*info).console.mfn = domain.console_mfn;
646 (*info).console.evtchn = domain.console_evtchn;
647 if let Some(ref initrd_segment) = domain.initrd_segment {
648 (*info).mod_start = initrd_segment.vstart;
649 (*info).mod_len = initrd_segment.size;
650 }
651 for (i, c) in domain.cmdline.chars().enumerate() {
652 (*info).cmdline[i] = c as c_char;
653 }
654 (*info).cmdline[MAX_GUEST_CMDLINE - 1] = 0;
655 trace!("setup_start_info start_info={:?}", *info);
656 }
657 Ok(())
658 }
659
660 async fn bootlate(&mut self, domain: &mut BootDomain) -> Result<()> {
661 let p2m_segment = self
662 .p2m_segment
663 .as_ref()
664 .ok_or(Error::MemorySetupFailed("p2m_segment missing"))?;
665 let page_table_segment = self
666 .page_table_segment
667 .as_ref()
668 .ok_or(Error::MemorySetupFailed("page_table_segment missing"))?;
669 let pg_pfn = page_table_segment.pfn;
670 let pg_mfn = domain.phys.p2m[pg_pfn as usize];
671 domain.phys.unmap(pg_pfn)?;
672 domain.phys.unmap(p2m_segment.pfn)?;
673
674 domain
675 .call
676 .mmuext(domain.domid, MMUEXT_PIN_L4_TABLE, pg_mfn, 0)
677 .await?;
678 Ok(())
679 }
680
681 async fn gnttab_seed(&mut self, domain: &mut BootDomain) -> Result<()> {
682 let xenstore_segment = self
683 .xenstore_segment
684 .as_ref()
685 .ok_or(Error::MemorySetupFailed("xenstore_segment missing"))?;
686
687 let console_gfn = domain.console_mfn as usize;
688 let xenstore_gfn = domain.phys.p2m[xenstore_segment.pfn as usize];
689 let addr = domain
690 .call
691 .mmap(0, 1 << XEN_PAGE_SHIFT)
692 .await
693 .ok_or(Error::MmapFailed)?;
694 domain
695 .call
696 .map_resource(domain.domid, 1, 0, 0, 1, addr)
697 .await?;
698 let entries = unsafe { slice::from_raw_parts_mut(addr as *mut GrantEntry, 2) };
699 entries[0].flags = 1 << 0;
700 entries[0].domid = 0;
701 entries[0].frame = console_gfn as u32;
702 entries[1].flags = 1 << 0;
703 entries[1].domid = 0;
704 entries[1].frame = xenstore_gfn as u32;
705 unsafe {
706 let result = munmap(addr as *mut c_void, 1 << XEN_PAGE_SHIFT);
707 if result != 0 {
708 return Err(Error::UnmapFailed(Errno::from_raw(result)));
709 }
710 }
711 Ok(())
712 }
713
714 async fn vcpu(&mut self, domain: &mut BootDomain) -> Result<()> {
715 let page_table_segment = self
716 .page_table_segment
717 .as_ref()
718 .ok_or(Error::MemorySetupFailed("page_table_segment missing"))?;
719 let boot_stack_segment = self
720 .boot_stack_segment
721 .as_ref()
722 .ok_or(Error::MemorySetupFailed("boot_stack_segment missing"))?;
723 let start_info_segment = self
724 .start_info_segment
725 .as_ref()
726 .ok_or(Error::MemorySetupFailed("start_info_segment missing"))?;
727 let pg_pfn = page_table_segment.pfn;
728 let pg_mfn = domain.phys.p2m[pg_pfn as usize];
729 let mut vcpu = x8664VcpuGuestContext::default();
730 vcpu.user_regs.rip = domain.image_info.virt_entry;
731 vcpu.user_regs.rsp =
732 domain.image_info.virt_base + (boot_stack_segment.pfn + 1) * self.page_size();
733 vcpu.user_regs.rsi =
734 domain.image_info.virt_base + (start_info_segment.pfn) * self.page_size();
735 vcpu.user_regs.rflags = 1 << 9;
736 vcpu.debugreg[6] = 0xffff0ff0;
737 vcpu.debugreg[7] = 0x00000400;
738 vcpu.flags = VGCF_IN_KERNEL | VGCF_ONLINE;
739 let cr3_pfn = pg_mfn;
740 debug!("cr3: pfn {:#x} mfn {:#x}", page_table_segment.pfn, cr3_pfn);
741 vcpu.ctrlreg[3] = cr3_pfn << 12;
742 vcpu.user_regs.ds = 0x0;
743 vcpu.user_regs.es = 0x0;
744 vcpu.user_regs.fs = 0x0;
745 vcpu.user_regs.gs = 0x0;
746 vcpu.user_regs.ss = 0xe02b;
747 vcpu.user_regs.cs = 0xe033;
748 vcpu.kernel_ss = vcpu.user_regs.ss as u64;
749 vcpu.kernel_sp = vcpu.user_regs.rsp;
750 trace!("vcpu context: {:?}", vcpu);
751 domain
752 .call
753 .set_vcpu_context(domain.domid, 0, VcpuGuestContextAny { value: vcpu })
754 .await?;
755 Ok(())
756 }
757
758 async fn setup_hypercall_page(&mut self, domain: &mut BootDomain) -> Result<()> {
759 if domain.image_info.virt_hypercall == u64::MAX {
760 return Ok(());
761 }
762 let pfn =
763 (domain.image_info.virt_hypercall - domain.image_info.virt_base) >> self.page_shift();
764 let mfn = domain.phys.p2m[pfn as usize];
765 domain.call.hypercall_init(domain.domid, mfn).await?;
766 Ok(())
767 }
768}