memflow/mem/virt_translate/mmu/
translate_data.rs1use crate::iter::SplitAtIndex;
2use crate::types::{umem, Address};
3
4use super::{ArchMmuDef, ArchMmuSpec, MmuTranslationBase};
5
6use std::cmp::Ordering;
7
8use super::MVec;
9
10pub type TranslateVec<'a> = MVec<'a, TranslationChunk<Address>>;
11pub type TranslateDataVec<'a, T> = MVec<'a, TranslateData<T>>;
12
13unsafe fn shorten_datavec_lifetime<'a: 'b, 'b, O>(
14 r: &'b mut TranslateDataVec<'a, O>,
15) -> &'b mut TranslateDataVec<'b, O> {
16 std::mem::transmute(r)
17}
18
19unsafe fn shorten_pair_lifetime<'a: 't, 'b: 't, 't, O>(
20 r: &'t mut (TranslateVec<'a>, TranslateDataVec<'b, O>),
21) -> &'t mut (TranslateVec<'t>, TranslateDataVec<'t, O>) {
22 std::mem::transmute(r)
23}
24
25#[derive(Debug)]
26pub struct TranslateData<T> {
27 pub addr: Address,
28 pub meta_addr: Address,
29 pub buf: T,
30}
31
32impl<T: SplitAtIndex> TranslateData<T> {
33 pub fn split_at_address(self, addr: Address) -> (Option<Self>, Option<Self>) {
34 let sub = self.addr.to_umem();
35 self.split_at(addr.to_umem().saturating_sub(sub))
36 }
37
38 pub fn split_at_address_rev(self, addr: Address) -> (Option<Self>, Option<Self>) {
39 let base = self.addr + self.length();
40 self.split_at_rev(base.to_umem().saturating_sub(addr.to_umem()))
41 }
42}
43
44impl<T: SplitAtIndex> Ord for TranslateData<T> {
45 fn cmp(&self, other: &Self) -> Ordering {
46 self.addr.cmp(&other.addr)
47 }
48}
49
50impl<T: SplitAtIndex> Eq for TranslateData<T> {}
51
52impl<T> PartialOrd for TranslateData<T> {
53 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
54 self.addr.partial_cmp(&other.addr)
55 }
56}
57
58impl<T> PartialEq for TranslateData<T> {
59 fn eq(&self, other: &Self) -> bool {
60 self.addr == other.addr
61 }
62}
63
64impl<T: SplitAtIndex> SplitAtIndex for TranslateData<T> {
65 fn split_at(self, idx: umem) -> (Option<Self>, Option<Self>)
66 where
67 Self: Sized,
68 {
69 let addr = self.addr;
70 let meta_addr = self.meta_addr;
71 let (bleft, bright) = self.buf.split_at(idx);
72
73 (
74 bleft.map(|buf| TranslateData {
75 addr,
76 meta_addr,
77 buf,
78 }),
79 bright.map(|buf| TranslateData {
80 buf,
81 addr: addr + idx,
82 meta_addr: meta_addr + idx,
83 }),
84 )
85 }
86
87 unsafe fn split_at_mut(&mut self, idx: umem) -> (Option<Self>, Option<Self>)
88 where
89 Self: Sized,
90 {
91 let addr = self.addr;
92 let meta_addr = self.meta_addr;
93 let (bleft, bright) = self.buf.split_at_mut(idx);
94
95 (
96 bleft.map(|buf| TranslateData {
97 addr,
98 meta_addr,
99 buf,
100 }),
101 bright.map(|buf| TranslateData {
102 buf,
103 addr: addr + idx,
104 meta_addr: meta_addr + idx,
105 }),
106 )
107 }
108
109 fn length(&self) -> umem {
110 self.buf.length()
111 }
112
113 fn size_hint(&self) -> usize {
114 self.buf.size_hint()
115 }
116}
117
118bitflags! {
119 #[repr(transparent)]
120 #[cfg_attr(feature = "serde", derive(::serde::Serialize, ::serde::Deserialize))]
121 #[cfg_attr(feature = "abi_stable", derive(::abi_stable::StableAbi))]
122 pub struct FlagsType: u8 {
123 const NONE = 0b00;
124 const WRITEABLE = 0b01;
126 const NX = 0b10;
128 }
129}
130
131#[derive(Debug)]
133pub struct TranslationChunk<T> {
134 pub pt_addr: T,
135 pub addr_count: usize,
136 pub min_addr: Address,
137 max_addr: Address,
138 pub step: usize,
139 pub prev_flags: FlagsType,
140}
141
142impl FlagsType {
143 pub fn nx(mut self, flag: bool) -> Self {
144 self &= !(FlagsType::NX);
145 if flag {
146 self | FlagsType::NX
147 } else {
148 self
149 }
150 }
151
152 pub fn writeable(mut self, flag: bool) -> Self {
153 self &= !(FlagsType::WRITEABLE);
154 if flag {
155 self | FlagsType::WRITEABLE
156 } else {
157 self
158 }
159 }
160}
161
162impl TranslationChunk<Address> {
163 pub fn update_flags(&mut self, mmu_def: &ArchMmuDef) {
164 self.prev_flags = FlagsType::NONE
165 .writeable((mmu_def.writeable_bit)(
166 self.pt_addr,
167 self.prev_flags.contains(FlagsType::WRITEABLE),
168 ))
169 .nx((mmu_def.nx_bit)(
170 self.pt_addr,
171 self.prev_flags.contains(FlagsType::NX),
172 ));
173 }
174}
175
176impl<T> TranslationChunk<T> {
177 pub fn new(pt_addr: T, prev_flags: FlagsType) -> Self {
178 let (min, max) = (!0u64, 0u64);
179 Self::with_minmax(pt_addr, prev_flags, min.into(), max.into())
180 }
181
182 pub fn with_minmax(
183 pt_addr: T,
184 prev_flags: FlagsType,
185 min_addr: Address,
186 max_addr: Address,
187 ) -> Self {
188 Self {
189 pt_addr,
190 addr_count: 0,
191 step: 0,
192 min_addr,
193 max_addr,
194 prev_flags,
195 }
196 }
197}
198
199impl<T: MmuTranslationBase> TranslationChunk<T> {
200 pub fn push_data<U: SplitAtIndex>(
202 &mut self,
203 data: TranslateData<U>,
204 stack: &mut TranslateDataVec<U>,
205 ) {
206 self.min_addr = std::cmp::min(self.min_addr, data.addr);
207 self.max_addr = std::cmp::max(self.max_addr, data.addr + data.length());
208 self.addr_count += 1;
209 stack.push(data);
210 }
211
212 pub fn pop_data<U: SplitAtIndex>(
214 &mut self,
215 stack: &mut TranslateDataVec<U>,
216 ) -> Option<TranslateData<U>> {
217 if self.addr_count > 0 {
218 self.addr_count -= 1;
219 stack.pop()
220 } else {
221 None
222 }
223 }
224
225 pub fn next_max_addr_count(&self, spec: &ArchMmuSpec) -> umem {
226 let step_size = spec.page_size_step_unchecked(self.step + 1);
227
228 let addr_diff = self.max_addr.wrapping_sub(self.min_addr).to_umem();
229 let add = (addr_diff % step_size != 0) as umem;
230
231 self.addr_count as umem * (addr_diff / step_size + add)
232 }
233
234 pub fn split_chunk<U: SplitAtIndex>(
236 mut self,
237 spec: &ArchMmuSpec,
238 (addr_stack, tmp_addr_stack): (&mut TranslateDataVec<U>, &mut TranslateDataVec<U>),
239 out_target: &mut (TranslateVec, TranslateDataVec<U>),
240 wait_target: &mut (TranslateVec, TranslateDataVec<U>),
241 ) {
242 let mut addr_stack = unsafe { shorten_datavec_lifetime(addr_stack) };
248 let mut tmp_addr_stack = unsafe { shorten_datavec_lifetime(tmp_addr_stack) };
249 let mut out_target = unsafe { shorten_pair_lifetime(out_target) };
250 let mut wait_target = unsafe { shorten_pair_lifetime(wait_target) };
251
252 let align_as = spec.page_size_step_unchecked(self.step);
253 let step_size = spec.page_size_step_unchecked(self.step + 1);
254
255 let upper = (self.max_addr - 1usize).as_mem_aligned(step_size).to_umem();
259 let lower = self.min_addr.as_mem_aligned(step_size).to_umem();
260
261 let mut cur_max_addr: umem = !0;
262
263 for (cnt, addr) in (0..=((upper - lower) / step_size))
266 .map(|i| upper - i * step_size)
267 .enumerate()
268 {
269 if addr > cur_max_addr {
270 continue;
271 }
272
273 cur_max_addr = 0;
274
275 let remaining = (addr - lower) / step_size + 1;
278
279 let (chunks_out, addrs_out) = if out_target.0.capacity() as umem
280 >= out_target.0.len() as umem + remaining
281 && out_target.1.capacity() as umem
282 >= out_target.1.len() as umem + self.addr_count as umem * remaining
283 {
284 &mut out_target
285 } else {
286 &mut wait_target
287 };
288
289 let addr = Address::from(addr);
290 let addr_aligned = addr.as_mem_aligned(align_as);
291 let index = (addr - addr_aligned) as umem / step_size;
292 let (pt_addr, _) = self.pt_addr.get_pt_by_index(index as usize);
293 let pt_addr = spec.vtop_step(pt_addr, addr, self.step);
294
295 let mut new_chunk = TranslationChunk::new(pt_addr, self.prev_flags);
296
297 for _ in 0..self.addr_count {
299 let data = self.pop_data(addr_stack).unwrap();
300
301 debug_assert!(
302 data.addr >= self.min_addr,
303 "__ {} {:x}+{:x} | {:#?}",
304 cnt,
305 data.addr,
306 data.length(),
307 &self
308 );
309 debug_assert!(
310 data.addr + data.length() <= self.max_addr,
311 "{} {:x}+{:x} | {:#?}",
312 cnt,
313 data.addr,
314 data.length(),
315 &self
316 );
317
318 let (left, right) = data.split_at_address(addr);
319
320 if let Some(data) = right {
321 new_chunk.push_data(data, addrs_out);
322 }
323
324 if let Some(data) = left {
326 cur_max_addr =
327 std::cmp::max((data.addr + data.length()).to_umem(), cur_max_addr);
328 self.push_data(data, tmp_addr_stack);
329 }
330 }
331
332 if new_chunk.addr_count > 0 {
333 new_chunk.step = self.step;
334 chunks_out.push(new_chunk);
335 }
336
337 std::mem::swap(&mut addr_stack, &mut tmp_addr_stack);
338 }
339
340 debug_assert!(self.addr_count == 0);
341 }
342}