vm_memory/
atomic.rs

1// Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2// Copyright (C) 2020 Red Hat, Inc. All rights reserved.
3// SPDX-License-Identifier: Apache-2.0
4
5//! A wrapper over an `ArcSwap<GuestMemory>` struct to support RCU-style mutability.
6//!
7//! With the `backend-atomic` feature enabled, simply replacing `GuestMemoryMmap`
8//! with `GuestMemoryAtomic<GuestMemoryMmap>` will enable support for mutable memory maps.
9//! To support mutable memory maps, devices will also need to use
10//! `GuestAddressSpace::memory()` to gain temporary access to guest memory.
11
12extern crate arc_swap;
13
14use arc_swap::{ArcSwap, Guard};
15use std::ops::Deref;
16use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
17
18use crate::{GuestAddressSpace, GuestMemory};
19
20/// A fast implementation of a mutable collection of memory regions.
21///
22/// This implementation uses `ArcSwap` to provide RCU-like snapshotting of the memory map:
23/// every update of the memory map creates a completely new `GuestMemory` object, and
24/// readers will not be blocked because the copies they retrieved will be collected once
25/// no one can access them anymore.  Under the assumption that updates to the memory map
26/// are rare, this allows a very efficient implementation of the `memory()` method.
27#[derive(Debug)]
28pub struct GuestMemoryAtomic<M: GuestMemory> {
29    // GuestAddressSpace<M>, which we want to implement, is basically a drop-in
30    // replacement for &M.  Therefore, we need to pass to devices the `GuestMemoryAtomic`
31    // rather than a reference to it.  To obtain this effect we wrap the actual fields
32    // of GuestMemoryAtomic with an Arc, and derive the Clone trait.  See the
33    // documentation for GuestAddressSpace for an example.
34    inner: Arc<(ArcSwap<M>, Mutex<()>)>,
35}
36
37impl<M: GuestMemory> From<Arc<M>> for GuestMemoryAtomic<M> {
38    /// create a new `GuestMemoryAtomic` object whose initial contents come from
39    /// the `map` reference counted `GuestMemory`.
40    fn from(map: Arc<M>) -> Self {
41        let inner = (ArcSwap::new(map), Mutex::new(()));
42        GuestMemoryAtomic {
43            inner: Arc::new(inner),
44        }
45    }
46}
47
48impl<M: GuestMemory> GuestMemoryAtomic<M> {
49    /// create a new `GuestMemoryAtomic` object whose initial contents come from
50    /// the `map` `GuestMemory`.
51    pub fn new(map: M) -> Self {
52        Arc::new(map).into()
53    }
54
55    fn load(&self) -> Guard<Arc<M>> {
56        self.inner.0.load()
57    }
58
59    /// Acquires the update mutex for the `GuestMemoryAtomic`, blocking the current
60    /// thread until it is able to do so.  The returned RAII guard allows for
61    /// scoped unlock of the mutex (that is, the mutex will be unlocked when
62    /// the guard goes out of scope), and optionally also for replacing the
63    /// contents of the `GuestMemoryAtomic` when the lock is dropped.
64    pub fn lock(&self) -> LockResult<GuestMemoryExclusiveGuard<M>> {
65        match self.inner.1.lock() {
66            Ok(guard) => Ok(GuestMemoryExclusiveGuard {
67                parent: self,
68                _guard: guard,
69            }),
70            Err(err) => Err(PoisonError::new(GuestMemoryExclusiveGuard {
71                parent: self,
72                _guard: err.into_inner(),
73            })),
74        }
75    }
76}
77
78impl<M: GuestMemory> Clone for GuestMemoryAtomic<M> {
79    fn clone(&self) -> Self {
80        Self {
81            inner: self.inner.clone(),
82        }
83    }
84}
85
86impl<M: GuestMemory> GuestAddressSpace for GuestMemoryAtomic<M> {
87    type T = GuestMemoryLoadGuard<M>;
88    type M = M;
89
90    fn memory(&self) -> Self::T {
91        GuestMemoryLoadGuard { guard: self.load() }
92    }
93}
94
95/// A guard that provides temporary access to a `GuestMemoryAtomic`.  This
96/// object is returned from the `memory()` method.  It dereference to
97/// a snapshot of the `GuestMemory`, so it can be used transparently to
98/// access memory.
99#[derive(Debug)]
100pub struct GuestMemoryLoadGuard<M: GuestMemory> {
101    guard: Guard<Arc<M>>,
102}
103
104impl<M: GuestMemory> GuestMemoryLoadGuard<M> {
105    /// Make a clone of the held pointer and returns it.  This is more
106    /// expensive than just using the snapshot, but it allows to hold on
107    /// to the snapshot outside the scope of the guard.  It also allows
108    /// writers to proceed, so it is recommended if the reference must
109    /// be held for a long time (including for caching purposes).
110    pub fn into_inner(self) -> Arc<M> {
111        Guard::into_inner(self.guard)
112    }
113}
114
115impl<M: GuestMemory> Clone for GuestMemoryLoadGuard<M> {
116    fn clone(&self) -> Self {
117        GuestMemoryLoadGuard {
118            guard: Guard::from_inner(Arc::clone(&*self.guard)),
119        }
120    }
121}
122
123impl<M: GuestMemory> Deref for GuestMemoryLoadGuard<M> {
124    type Target = M;
125
126    fn deref(&self) -> &Self::Target {
127        &self.guard
128    }
129}
130
131/// An RAII implementation of a "scoped lock" for `GuestMemoryAtomic`.  When
132/// this structure is dropped (falls out of scope) the lock will be unlocked,
133/// possibly after updating the memory map represented by the
134/// `GuestMemoryAtomic` that created the guard.
135#[derive(Debug)]
136pub struct GuestMemoryExclusiveGuard<'a, M: GuestMemory> {
137    parent: &'a GuestMemoryAtomic<M>,
138    _guard: MutexGuard<'a, ()>,
139}
140
141impl<M: GuestMemory> GuestMemoryExclusiveGuard<'_, M> {
142    /// Replace the memory map in the `GuestMemoryAtomic` that created the guard
143    /// with the new memory map, `map`.  The lock is then dropped since this
144    /// method consumes the guard.
145    pub fn replace(self, map: M) {
146        self.parent.inner.0.store(Arc::new(map))
147    }
148}
149
150#[cfg(test)]
151mod tests {
152    use super::*;
153    use crate::region::tests::{new_guest_memory_collection_from_regions, Collection, MockRegion};
154    use crate::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestUsize};
155
156    type GuestMemoryMmapAtomic = GuestMemoryAtomic<Collection>;
157
158    #[test]
159    fn test_atomic_memory() {
160        let region_size = 0x400;
161        let regions = vec![
162            (GuestAddress(0x0), region_size),
163            (GuestAddress(0x1000), region_size),
164        ];
165        let mut iterated_regions = Vec::new();
166        let gmm = new_guest_memory_collection_from_regions(&regions).unwrap();
167        let gm = GuestMemoryMmapAtomic::new(gmm);
168        let mem = gm.memory();
169
170        for region in mem.iter() {
171            assert_eq!(region.len(), region_size as GuestUsize);
172        }
173
174        for region in mem.iter() {
175            iterated_regions.push((region.start_addr(), region.len()));
176        }
177        assert_eq!(regions, iterated_regions);
178        assert_eq!(mem.num_regions(), 2);
179        assert!(mem.find_region(GuestAddress(0x1000)).is_some());
180        assert!(mem.find_region(GuestAddress(0x10000)).is_none());
181
182        assert!(regions
183            .iter()
184            .map(|x| (x.0, x.1))
185            .eq(iterated_regions.iter().copied()));
186
187        let mem2 = mem.into_inner();
188        for region in mem2.iter() {
189            assert_eq!(region.len(), region_size as GuestUsize);
190        }
191        assert_eq!(mem2.num_regions(), 2);
192        assert!(mem2.find_region(GuestAddress(0x1000)).is_some());
193        assert!(mem2.find_region(GuestAddress(0x10000)).is_none());
194
195        assert!(regions
196            .iter()
197            .map(|x| (x.0, x.1))
198            .eq(iterated_regions.iter().copied()));
199
200        let mem3 = mem2.memory();
201        for region in mem3.iter() {
202            assert_eq!(region.len(), region_size as GuestUsize);
203        }
204        assert_eq!(mem3.num_regions(), 2);
205        assert!(mem3.find_region(GuestAddress(0x1000)).is_some());
206        assert!(mem3.find_region(GuestAddress(0x10000)).is_none());
207
208        let gm2 = gm.clone();
209        let mem4 = gm2.memory();
210        for region in mem4.iter() {
211            assert_eq!(region.len(), region_size as GuestUsize);
212        }
213        assert_eq!(mem4.num_regions(), 2);
214        assert!(mem4.find_region(GuestAddress(0x1000)).is_some());
215        assert!(mem4.find_region(GuestAddress(0x10000)).is_none());
216    }
217
218    #[test]
219    fn test_clone_guard() {
220        let region_size = 0x400;
221        let regions = vec![
222            (GuestAddress(0x0), region_size),
223            (GuestAddress(0x1000), region_size),
224        ];
225        let gmm = new_guest_memory_collection_from_regions(&regions).unwrap();
226        let gm = GuestMemoryMmapAtomic::new(gmm);
227        let mem = {
228            let guard1 = gm.memory();
229            Clone::clone(&guard1)
230        };
231        assert_eq!(mem.num_regions(), 2);
232    }
233
234    #[test]
235    fn test_atomic_hotplug() {
236        let region_size = 0x1000;
237        let regions = [
238            (GuestAddress(0x0), region_size),
239            (GuestAddress(0x10_0000), region_size),
240        ];
241        let mut gmm = Arc::new(new_guest_memory_collection_from_regions(&regions).unwrap());
242        let gm: GuestMemoryAtomic<_> = gmm.clone().into();
243        let mem_orig = gm.memory();
244        assert_eq!(mem_orig.num_regions(), 2);
245
246        {
247            let guard = gm.lock().unwrap();
248            let new_gmm = Arc::make_mut(&mut gmm);
249            let new_gmm = new_gmm
250                .insert_region(Arc::new(MockRegion {
251                    start: GuestAddress(0x8000),
252                    len: 0x1000,
253                }))
254                .unwrap();
255            let new_gmm = new_gmm
256                .insert_region(Arc::new(MockRegion {
257                    start: GuestAddress(0x4000),
258                    len: 0x1000,
259                }))
260                .unwrap();
261            let new_gmm = new_gmm
262                .insert_region(Arc::new(MockRegion {
263                    start: GuestAddress(0xc000),
264                    len: 0x1000,
265                }))
266                .unwrap();
267
268            new_gmm
269                .insert_region(Arc::new(MockRegion {
270                    start: GuestAddress(0x8000),
271                    len: 0x1000,
272                }))
273                .unwrap_err();
274
275            guard.replace(new_gmm);
276        }
277
278        assert_eq!(mem_orig.num_regions(), 2);
279        let mem = gm.memory();
280        assert_eq!(mem.num_regions(), 5);
281    }
282}