1use std::ops::Index;
6use std::os::fd::{AsRawFd, BorrowedFd};
7use std::sync::atomic::{AtomicU8, Ordering};
8use std::sync::{Arc, RwLock};
9use std::{io, ptr};
10use vm_memory::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice};
11use vm_memory::mmap::NewBitmap;
12use vm_memory::{Address, GuestMemoryRegion};
13
14const LOG_PAGE_SIZE: usize = 0x1000;
16const LOG_WORD_SIZE: usize = u8::BITS as usize;
19
20pub trait BitmapReplace: Bitmap {
22 type InnerBitmap: MemRegionBitmap;
23
24 fn replace(&self, bitmap: Self::InnerBitmap);
26}
27
28pub trait MemRegionBitmap: Sized {
30 fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self>;
33}
34
35impl BitmapReplace for () {
39 type InnerBitmap = ();
40
41 fn replace(&self, _bitmap: ()) {
43 panic!("The unit bitmap () must not be used if VHOST_USER_PROTOCOL_F_LOG_SHMFD is set");
44 }
45}
46
47impl MemRegionBitmap for () {
48 fn new<R: GuestMemoryRegion>(_region: &R, _logmem: Arc<MmapLogReg>) -> io::Result<Self> {
49 Err(io::Error::from(io::ErrorKind::Unsupported))
50 }
51}
52
53#[derive(Default, Debug, Clone)]
66pub struct BitmapMmapRegion {
67 inner: Arc<RwLock<Option<AtomicBitmapMmap>>>,
70 base_address: usize, }
72
73impl Bitmap for BitmapMmapRegion {
74 fn mark_dirty(&self, offset: usize, len: usize) {
75 let inner = self.inner.read().unwrap();
76 if let Some(bitmap) = inner.as_ref() {
77 if let Some(absolute_offset) = self.base_address.checked_add(offset) {
78 bitmap.mark_dirty(absolute_offset, len);
79 }
80 }
81 }
82
83 fn dirty_at(&self, offset: usize) -> bool {
84 let inner = self.inner.read().unwrap();
85 inner
86 .as_ref()
87 .is_some_and(|bitmap| bitmap.dirty_at(self.base_address.saturating_add(offset)))
88 }
89
90 fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice<'_>>::S {
91 Self {
92 inner: Arc::clone(&self.inner),
93 base_address: self.base_address.saturating_add(offset),
94 }
95 }
96}
97
98impl BitmapReplace for BitmapMmapRegion {
99 type InnerBitmap = AtomicBitmapMmap;
100
101 fn replace(&self, bitmap: AtomicBitmapMmap) {
102 let mut inner = self.inner.write().unwrap();
103 inner.replace(bitmap);
104 }
105}
106
107impl BitmapSlice for BitmapMmapRegion {}
108
109impl WithBitmapSlice<'_> for BitmapMmapRegion {
110 type S = Self;
111}
112
113impl NewBitmap for BitmapMmapRegion {
114 fn with_len(_len: usize) -> Self {
115 Self::default()
116 }
117}
118
119#[derive(Debug)]
125pub struct AtomicBitmapMmap {
126 logmem: Arc<MmapLogReg>,
127 pages_before_region: usize, number_of_pages: usize, }
130
131impl MemRegionBitmap for AtomicBitmapMmap {
138 fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self> {
141 let region_start_addr: usize = region.start_addr().raw_value().io_try_into()?;
142 let region_len: usize = region.len().io_try_into()?;
143 if region_len == 0 {
144 return Err(io::Error::from(io::ErrorKind::InvalidData));
145 }
146
147 let region_end_addr = region_start_addr
149 .checked_add(region_len - 1)
150 .ok_or(io::Error::from(io::ErrorKind::InvalidData))?;
151 let region_end_log_word = page_word(page_number(region_end_addr));
152 if region_end_log_word >= logmem.len() {
153 return Err(io::Error::from(io::ErrorKind::InvalidData));
154 }
155
156 let offset_pages = page_number(region_start_addr);
161 let size_page = page_number(region_len);
162
163 Ok(Self {
164 logmem,
165 pages_before_region: offset_pages,
166 number_of_pages: size_page,
167 })
168 }
169}
170
171impl AtomicBitmapMmap {
172 fn mark_dirty(&self, offset: usize, len: usize) {
176 if len == 0 {
177 return;
178 }
179
180 let first_page = page_number(offset);
181 let last_page = page_number(offset.saturating_add(len - 1));
182 for page in first_page..=last_page {
183 if page >= self.number_of_pages {
184 break; }
186
187 let page = self.pages_before_region + page;
189 self.logmem[page_word(page)].fetch_or(1 << page_bit(page), Ordering::Relaxed);
190 }
191 }
192
193 fn dirty_at(&self, offset: usize) -> bool {
197 let page = page_number(offset);
198 if page >= self.number_of_pages {
199 return false; }
201
202 let page = self.pages_before_region + page;
204 let page_bit = self.logmem[page_word(page)].load(Ordering::Relaxed) & (1 << page_bit(page));
205 page_bit != 0
206 }
207}
208
209#[derive(Debug)]
211pub struct MmapLogReg {
212 addr: *const AtomicU8,
213 len: usize,
214}
215
216unsafe impl Send for MmapLogReg {}
219
220unsafe impl Sync for MmapLogReg {}
224
225impl MmapLogReg {
226 pub(crate) fn from_file(fd: BorrowedFd, offset: u64, len: u64) -> io::Result<Self> {
233 let offset: isize = offset.io_try_into()?;
234 let len: usize = len.io_try_into()?;
235
236 if len > isize::MAX as usize {
238 return Err(io::Error::from(io::ErrorKind::InvalidData));
239 }
240
241 let addr = unsafe {
243 libc::mmap(
244 ptr::null_mut(),
245 len as libc::size_t,
246 libc::PROT_READ | libc::PROT_WRITE,
247 libc::MAP_SHARED,
248 fd.as_raw_fd(),
249 offset as libc::off_t,
250 )
251 };
252
253 if addr == libc::MAP_FAILED {
254 return Err(io::Error::last_os_error());
255 }
256
257 Ok(Self {
258 addr: addr as *const AtomicU8,
259 len,
260 })
261 }
262
263 fn len(&self) -> usize {
264 self.len
265 }
266}
267
268impl Index<usize> for MmapLogReg {
269 type Output = AtomicU8;
270
271 fn index(&self, index: usize) -> &Self::Output {
273 assert!(index < self.len);
274 unsafe { &*self.addr.add(index) }
278 }
279}
280
281impl Drop for MmapLogReg {
282 fn drop(&mut self) {
283 unsafe {
286 libc::munmap(self.addr as *mut libc::c_void, self.len as libc::size_t);
287 }
288 }
289}
290
291trait IoTryInto<T: TryFrom<Self>>: Sized {
292 fn io_try_into(self) -> io::Result<T>;
293}
294
295impl<TySrc, TyDst> IoTryInto<TyDst> for TySrc
296where
297 TyDst: TryFrom<TySrc>,
298 <TyDst as TryFrom<TySrc>>::Error: Send + Sync + std::error::Error + 'static,
299{
300 fn io_try_into(self) -> io::Result<TyDst> {
301 self.try_into()
302 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
303 }
304}
305
306#[inline]
307fn page_number(addr: usize) -> usize {
309 addr / LOG_PAGE_SIZE
310}
311
312#[inline]
313fn page_word(page: usize) -> usize {
316 page / LOG_WORD_SIZE
317}
318
319#[inline]
320fn page_bit(page: usize) -> usize {
322 page % LOG_WORD_SIZE
323}
324
325#[cfg(test)]
326mod tests {
327 use super::*;
328 use std::fs::File;
329 use std::io::Write;
330 use std::os::fd::AsFd;
331 use vm_memory::{GuestAddress, GuestRegionMmap};
332 use vmm_sys_util::tempfile::TempFile;
333
334 pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
336 (start..start + len).all(|offset| !b.dirty_at(offset))
337 }
338
339 pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
341 (start..start + len).all(|offset| b.dirty_at(offset))
342 }
343
344 fn tmp_file(len: usize) -> File {
345 let mut f = TempFile::new().unwrap().into_file();
346 let buf = vec![0; len];
347 f.write_all(buf.as_ref()).unwrap();
348 f
349 }
350
351 fn test_all(b: &BitmapMmapRegion, len: usize) {
352 assert!(range_is_clean(b, 0, len), "The bitmap should be clean");
353
354 b.mark_dirty(0, len);
355 assert!(range_is_dirty(b, 0, len), "The bitmap should be dirty");
356 }
357
358 #[test]
359 #[cfg(not(miri))] fn test_bitmap_region_bigger_than_log() {
361 let mmap_offset: u64 = 0;
364 let mmap_size = 1; let f = tmp_file(mmap_size);
366
367 let region_start_addr = GuestAddress(mmap_offset);
369 let region_len = LOG_PAGE_SIZE * 16;
370 let region: GuestRegionMmap<()> =
371 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
372
373 let logmem =
374 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
375
376 let log = AtomicBitmapMmap::new(®ion, logmem);
377
378 assert!(log.is_err());
379 }
380 #[test]
381 #[cfg(not(miri))] fn test_bitmap_log_and_region_same_size() {
383 let mmap_offset: u64 = 0;
385 let mmap_size = 4; let f = tmp_file(mmap_size);
387
388 let region_start_addr = GuestAddress::new(mmap_offset);
390 let region_len = LOG_PAGE_SIZE * 32;
391 let region: GuestRegionMmap<()> =
392 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
393
394 let logmem =
395 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
396
397 let log = AtomicBitmapMmap::new(®ion, logmem);
398 assert!(log.is_ok());
399 let log = log.unwrap();
400
401 let bitmap = BitmapMmapRegion::default();
402 bitmap.replace(log);
403
404 test_all(&bitmap, region_len);
405 }
406
407 #[test]
408 #[cfg(not(miri))] fn test_bitmap_region_smaller_than_log() {
410 let mmap_offset: u64 = 0;
412 let mmap_size = 4; let f = tmp_file(mmap_size);
414
415 let region_start_addr = GuestAddress::new(mmap_offset);
417 let region_len = LOG_PAGE_SIZE * 16;
418 let region: GuestRegionMmap<()> =
419 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
420
421 let logmem =
422 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
423
424 let log = AtomicBitmapMmap::new(®ion, logmem);
425 assert!(log.is_ok());
426 let log = log.unwrap();
427
428 let bitmap = BitmapMmapRegion::default();
429
430 bitmap.replace(log);
431
432 test_all(&bitmap, region_len);
433 }
434
435 #[test]
436 #[cfg(not(miri))] fn test_bitmap_region_smaller_than_one_word() {
438 let mmap_offset: u64 = 0;
440 let mmap_size = 4; let f = tmp_file(mmap_size);
442
443 let region_start_addr = GuestAddress::new(mmap_offset);
445 let region_len = LOG_PAGE_SIZE * 6;
446 let region: GuestRegionMmap<()> =
447 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
448
449 let logmem =
450 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
451
452 let log = AtomicBitmapMmap::new(®ion, logmem);
453 assert!(log.is_ok());
454 let log = log.unwrap();
455
456 let bitmap = BitmapMmapRegion::default();
457 bitmap.replace(log);
458
459 test_all(&bitmap, region_len);
460 }
461
462 #[test]
463 #[cfg(not(miri))] fn test_bitmap_two_regions_overlapping_word_first_dirty() {
465 let mmap_offset: u64 = 0;
467 let mmap_size = 4; let f = tmp_file(mmap_size);
469
470 let logmem =
471 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
472
473 let region0_start_addr = GuestAddress::new(mmap_offset);
475 let region0_len = LOG_PAGE_SIZE * 11;
476 let region0: GuestRegionMmap<()> =
477 GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
478
479 let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
480 assert!(log0.is_ok());
481 let log0 = log0.unwrap();
482 let bitmap0 = BitmapMmapRegion::default();
483 bitmap0.replace(log0);
484
485 let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
487 let region1_len = LOG_PAGE_SIZE;
488 let region1: GuestRegionMmap<()> =
489 GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
490
491 let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
492 assert!(log1.is_ok());
493 let log1 = log1.unwrap();
494
495 let bitmap1 = BitmapMmapRegion::default();
496 bitmap1.replace(log1);
497
498 assert!(
500 range_is_clean(&bitmap0, 0, region0_len),
501 "The bitmap0 should be clean"
502 );
503 assert!(
504 range_is_clean(&bitmap1, 0, region1_len),
505 "The bitmap1 should be clean"
506 );
507
508 bitmap0.mark_dirty(0, region0_len);
510
511 assert!(
512 range_is_dirty(&bitmap0, 0, region0_len),
513 "The bitmap0 should be dirty"
514 );
515 assert!(
516 range_is_clean(&bitmap1, 0, region1_len),
517 "The bitmap1 should be clean"
518 );
519 }
520
521 #[test]
522 #[cfg(not(miri))] fn test_bitmap_two_regions_overlapping_word_second_dirty() {
524 let mmap_offset: u64 = 0;
526 let mmap_size = 4; let f = tmp_file(mmap_size);
528
529 let logmem =
530 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
531
532 let region0_start_addr = GuestAddress::new(mmap_offset);
534 let region0_len = LOG_PAGE_SIZE * 11;
535 let region0: GuestRegionMmap<()> =
536 GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
537
538 let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
539 assert!(log0.is_ok());
540 let log0 = log0.unwrap();
541
542 let bitmap0 = BitmapMmapRegion::default();
543 bitmap0.replace(log0);
544
545 let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
547 let region1_len = LOG_PAGE_SIZE;
548 let region1: GuestRegionMmap<()> =
549 GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
550
551 let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
552 assert!(log1.is_ok());
553 let log1 = log1.unwrap();
554
555 let bitmap1 = BitmapMmapRegion::default();
556 bitmap1.replace(log1);
557
558 assert!(
560 range_is_clean(&bitmap0, 0, region0_len),
561 "The bitmap0 should be clean"
562 );
563 assert!(
564 range_is_clean(&bitmap1, 0, region1_len),
565 "The bitmap1 should be clean"
566 );
567
568 bitmap1.mark_dirty(0, region1_len);
570
571 assert!(
572 range_is_dirty(&bitmap1, 0, region1_len),
573 "The bitmap0 should be dirty"
574 );
575 assert!(
576 range_is_clean(&bitmap0, 0, region0_len),
577 "The bitmap1 should be clean"
578 );
579 }
580
581 #[test]
582 #[cfg(not(miri))] fn test_bitmap_region_slice() {
584 let mmap_offset: u64 = 0;
586 let mmap_size = 4; let f = tmp_file(mmap_size);
588
589 let region_start_addr = GuestAddress::new(mmap_offset);
591 let region_len = LOG_PAGE_SIZE * 32;
592 let region: GuestRegionMmap<()> =
593 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
594
595 let logmem =
596 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
597
598 let log = AtomicBitmapMmap::new(®ion, logmem);
599 assert!(log.is_ok());
600 let log = log.unwrap();
601
602 let bitmap = BitmapMmapRegion::default();
603 bitmap.replace(log);
604
605 assert!(
606 range_is_clean(&bitmap, 0, region_len),
607 "The bitmap should be clean"
608 );
609
610 let slice_len = region_len / 2;
612 let slice = bitmap.slice_at(slice_len);
613 assert!(
614 range_is_clean(&slice, 0, slice_len),
615 "The slice should be clean"
616 );
617
618 slice.mark_dirty(0, slice_len);
619 assert!(
620 range_is_dirty(&slice, 0, slice_len),
621 "The slice should be dirty"
622 );
623 assert!(
624 range_is_clean(&bitmap, 0, slice_len),
625 "The first half of the bitmap should be clean"
626 );
627 assert!(
628 range_is_dirty(&bitmap, slice_len, region_len - slice_len),
629 "The last half of the bitmap should be dirty"
630 );
631 }
632}