1use crate::error::Result;
2use crate::sys::XEN_PAGE_SHIFT;
3use crate::Error;
4use libc::munmap;
5use log::debug;
6use nix::errno::Errno;
7use std::ffi::c_void;
8use std::slice;
9
10use xencall::sys::MmapEntry;
11use xencall::XenCall;
12
13#[derive(Debug, Clone)]
14pub struct PhysicalPage {
15 pfn: u64,
16 pub ptr: u64,
17 count: u64,
18}
19
20pub struct PhysicalPages {
21 page_shift: u64,
22 domid: u32,
23 pub p2m: Vec<u64>,
24 call: XenCall,
25 pages: Vec<PhysicalPage>,
26}
27
28impl PhysicalPages {
29 pub fn new(call: XenCall, domid: u32, page_shift: u64) -> PhysicalPages {
30 PhysicalPages {
31 page_shift,
32 domid,
33 p2m: Vec::new(),
34 call,
35 pages: Vec::new(),
36 }
37 }
38
39 pub fn load_p2m(&mut self, p2m: Vec<u64>) {
40 self.p2m = p2m;
41 }
42
43 pub fn p2m_size(&mut self) -> u64 {
44 self.p2m.len() as u64
45 }
46
47 pub async fn pfn_to_ptr(&mut self, pfn: u64, count: u64) -> Result<u64> {
48 for page in &self.pages {
49 if pfn >= page.pfn + page.count {
50 continue;
51 }
52
53 if count > 0 {
54 if (pfn + count) <= page.pfn {
55 continue;
56 }
57
58 if pfn < page.pfn || (pfn + count) > page.pfn + page.count {
59 return Err(Error::MemorySetupFailed("pfn is out of range"));
60 }
61 } else {
62 if pfn < page.pfn {
63 continue;
64 }
65
66 if pfn >= page.pfn + page.count {
67 continue;
68 }
69 }
70
71 return Ok(page.ptr + ((pfn - page.pfn) << self.page_shift));
72 }
73
74 if count == 0 {
75 return Err(Error::MemorySetupFailed("page count is zero"));
76 }
77
78 self.pfn_alloc(pfn, count).await
79 }
80
81 async fn pfn_alloc(&mut self, pfn: u64, count: u64) -> Result<u64> {
82 let mut entries = vec![MmapEntry::default(); count as usize];
83 for (i, entry) in entries.iter_mut().enumerate() {
84 if !self.p2m.is_empty() {
85 entry.mfn = self.p2m[pfn as usize + i];
86 } else {
87 entry.mfn = pfn + i as u64;
88 }
89 }
90 let chunk_size = 1 << XEN_PAGE_SHIFT;
91 let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
92 let num = num_per_entry * count as usize;
93 let mut pfns = vec![u64::MAX; num];
94 for i in 0..count as usize {
95 for j in 0..num_per_entry {
96 pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
97 }
98 }
99
100 let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
101 let addr = self
102 .call
103 .mmap(0, actual_mmap_len)
104 .await
105 .ok_or(Error::MmapFailed)?;
106 debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
107 let result = self
108 .call
109 .mmap_batch(self.domid, num as u64, addr, pfns)
110 .await?;
111 if result != 0 {
112 return Err(Error::MmapFailed);
113 }
114 let page = PhysicalPage {
115 pfn,
116 ptr: addr,
117 count,
118 };
119 debug!(
120 "alloc_pfn {:#x}+{:#x} at {:#x}",
121 page.pfn, page.count, page.ptr
122 );
123 self.pages.push(page);
124 Ok(addr)
125 }
126
127 pub async fn map_foreign_pages(&mut self, mfn: u64, size: u64) -> Result<u64> {
128 let count = (size >> XEN_PAGE_SHIFT) as usize;
129 let mut entries = vec![MmapEntry::default(); count];
130 for (i, entry) in entries.iter_mut().enumerate() {
131 entry.mfn = mfn + i as u64;
132 }
133 let chunk_size = 1 << XEN_PAGE_SHIFT;
134 let num_per_entry = chunk_size >> XEN_PAGE_SHIFT;
135 let num = num_per_entry * count;
136 let mut pfns = vec![u64::MAX; num];
137 for i in 0..count {
138 for j in 0..num_per_entry {
139 pfns[i * num_per_entry + j] = entries[i].mfn + j as u64;
140 }
141 }
142
143 let actual_mmap_len = (num as u64) << XEN_PAGE_SHIFT;
144 let addr = self
145 .call
146 .mmap(0, actual_mmap_len)
147 .await
148 .ok_or(Error::MmapFailed)?;
149 debug!("mapped {:#x} foreign bytes at {:#x}", actual_mmap_len, addr);
150 let result = self
151 .call
152 .mmap_batch(self.domid, num as u64, addr, pfns)
153 .await?;
154 if result != 0 {
155 return Err(Error::MmapFailed);
156 }
157 let page = PhysicalPage {
158 pfn: mfn,
159 ptr: addr,
160 count: count as u64,
161 };
162 debug!(
163 "alloc_mfn {:#x}+{:#x} at {:#x}",
164 page.pfn, page.count, page.ptr
165 );
166 self.pages.push(page);
167 Ok(addr)
168 }
169
170 pub async fn clear_pages(&mut self, pfn: u64, count: u64) -> Result<()> {
171 let ptr = self.pfn_to_ptr(pfn, count).await?;
172 let slice = unsafe {
173 slice::from_raw_parts_mut(ptr as *mut u8, (count * (1 << self.page_shift)) as usize)
174 };
175 slice.fill(0);
176 Ok(())
177 }
178
179 pub fn unmap_all(&mut self) -> Result<()> {
180 for page in &self.pages {
181 unsafe {
182 let err = munmap(
183 page.ptr as *mut c_void,
184 (page.count << self.page_shift) as usize,
185 );
186 if err != 0 {
187 return Err(Error::UnmapFailed(Errno::from_raw(err)));
188 }
189 }
190 }
191 self.pages.clear();
192 Ok(())
193 }
194
195 pub fn unmap(&mut self, pfn: u64) -> Result<()> {
196 let page = self.pages.iter().enumerate().find(|(_, x)| x.pfn == pfn);
197 if page.is_none() {
198 return Err(Error::MemorySetupFailed("cannot unmap missing page"));
199 }
200 let (i, page) = page.unwrap();
201
202 unsafe {
203 let err = munmap(
204 page.ptr as *mut c_void,
205 (page.count << self.page_shift) as usize,
206 );
207 debug!(
208 "unmapped {:#x} foreign bytes at {:#x}",
209 (page.count << self.page_shift) as usize,
210 page.ptr
211 );
212 if err != 0 {
213 return Err(Error::UnmapFailed(Errno::from_raw(err)));
214 }
215 self.pages.remove(i);
216 }
217 Ok(())
218 }
219}