vm_allocator/lib.rs
1// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
3
4//! Manages system resources that can be allocated to VMs and their devices.
5//!
6//! # Example
7//!
8//! Depending on the use case of the VMM, both the `IDAllocator` and the `AddressAllocator`
9//! can be used. In the example below we assume that the `IDAllocator` is used for allocating
10//! unique identifiers for VM devices. We use the address allocator for allocating MMIO ranges
11//! for virtio devices.
12//!
13//! In the example below we use constants that are typical for the x86 platform, but this has no
14//! impact on the code actually working on aarch64.
15//!
16//! ```rust
17//! use std::collections::HashMap;
18//! use std::process::id;
19//! use vm_allocator::{AddressAllocator, AllocPolicy, Error, IdAllocator, RangeInclusive, Result};
20//!
21//! const FIRST_ADDR_PAST_32BITS: u64 = 1 << 32;
22//! const MEM_32BIT_GAP_SIZE: u64 = 768 << 20;
23//! const MMIO_MEM_START: u64 = FIRST_ADDR_PAST_32BITS - MEM_32BIT_GAP_SIZE;
24//! const PAGE_SIZE: u64 = 0x1000;
25//!
26//! struct DeviceManager {
27//! id_allocator: IdAllocator,
28//! mmio_allocator: AddressAllocator,
29//! slots: HashMap<u32, RangeInclusive>,
30//! }
31//!
32//! #[derive(Clone, Copy)]
33//! struct DeviceSlot {
34//! id: u32,
35//! mmio_range: RangeInclusive,
36//! }
37//!
38//! impl DeviceManager {
39//! pub fn new() -> Result<Self> {
40//! Ok(DeviceManager {
41//! id_allocator: IdAllocator::new(0, 100)?,
42//! mmio_allocator: AddressAllocator::new(MMIO_MEM_START, MEM_32BIT_GAP_SIZE)?,
43//! slots: HashMap::new(),
44//! })
45//! }
46//!
47//! pub fn reserve_device_slot(&mut self) -> Result<DeviceSlot> {
48//! // We're reserving the first available address that is aligned to the page size.
49//! // For each device we reserve one page of addresses.
50//! let mmio_range =
51//! self.mmio_allocator
52//! .allocate(PAGE_SIZE, PAGE_SIZE, AllocPolicy::FirstMatch)?;
53//! let slot = DeviceSlot {
54//! id: self.id_allocator.allocate_id()?,
55//! mmio_range,
56//! };
57//! self.slots.insert(slot.id, slot.mmio_range);
58//! Ok(slot)
59//! }
60//!
61//! // Free the device slot corresponding to the passed device ID.
62//! pub fn free_device_slot(&mut self, id: u32) -> Result<()> {
63//! let mmio_range = self.slots.get(&id).ok_or(Error::NeverAllocated(id))?;
64//! let _ = self.id_allocator.free_id(id)?;
65//! self.mmio_allocator.free(mmio_range)
66//! }
67//! }
68//!
69//! # fn main() {
70//! # let mut dm = DeviceManager::new().unwrap();
71//! # let slot = dm.reserve_device_slot().unwrap();
72//! # dm.free_device_slot(slot.id).unwrap();
73//! # }
74//! ```
75
76#![deny(missing_docs)]
77
78mod address_allocator;
79/// Allocation engine used by address allocator.
80mod allocation_engine;
81mod id_allocator;
82
83use std::{cmp::max, cmp::min, result};
84use thiserror::Error;
85
86use crate::allocation_engine::NodeState;
87pub use crate::{address_allocator::AddressAllocator, id_allocator::IdAllocator};
88
89/// Default alignment that can be used for creating a `Constraint`.
90pub const DEFAULT_CONSTRAINT_ALIGN: u64 = 4;
91
92/// Error type for IdAllocator usage.
93#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Error)]
94pub enum Error {
95 /// Management operations on desired resource resulted in overflow.
96 #[error("Management operations on desired resource resulted in overflow.")]
97 Overflow,
98 /// An id that is not part of the specified range was requested to be released.
99 #[error("Specified id: {0} is not in the range.")]
100 OutOfRange(u32),
101 /// An id that was already released was requested to be released.
102 #[error("Specified id: {0} is already released.")]
103 AlreadyReleased(u32),
104 /// An id that was never allocated was requested to be released.
105 #[error("Specified id: {0} was never allocated, can't release it.")]
106 NeverAllocated(u32),
107 /// The resource we want to use or update is not available.
108 #[error("The requested resource is not available.")]
109 ResourceNotAvailable,
110 /// The range to manage is invalid.
111 #[error("The range specified: {0}-{1} is not valid.")]
112 InvalidRange(u64, u64),
113 /// Alignment value is invalid
114 #[error("Alignment value is invalid.")]
115 InvalidAlignment,
116 /// The range that we try to insert into the interval tree is overlapping
117 /// with another node from the tree.
118 #[error("Addresses are overlapping.{0:?} intersects with existing {1:?}")]
119 Overlap(RangeInclusive, RangeInclusive),
120 /// A node state can be changed just from Free to Allocated, other transitions
121 /// are not valid.
122 #[error("Invalid state transition for node {0:?} from {1:?} to NodeState::Free")]
123 InvalidStateTransition(RangeInclusive, NodeState),
124 /// Address is unaligned
125 #[error("The address is not aligned.")]
126 UnalignedAddress,
127 /// Management operations on desired resource resulted in underflow.
128 #[error("Management operations on desired resource resulted in underflow.")]
129 Underflow,
130 /// The size of the desired resource is not invalid.
131 #[error("The specified size: {0} is not valid.")]
132 InvalidSize(u64),
133}
134
135/// Wrapper over std::result::Result
136pub type Result<T> = result::Result<T, Error>;
137
138/// A closed interval range [start, end].
139/// The range describes a memory slot which is assigned by the VMM to a device.
140///
141/// # Example
142///
143/// ```rust
144/// use vm_allocator::RangeInclusive;
145///
146/// let r = RangeInclusive::new(0x0, 0x100).unwrap();
147/// assert_eq!(r.len(), 0x101);
148/// assert_eq!(r.start(), 0x0);
149/// assert_eq!(r.end(), 0x100);
150///
151/// // Check if a region contains another region.
152/// let other = RangeInclusive::new(0x50, 0x80).unwrap();
153/// assert!(r.contains(&other));
154///
155/// // Check if a region overlaps with another one.
156/// let other = RangeInclusive::new(0x99, 0x150).unwrap();
157/// assert!(r.overlaps(&other));
158/// ```
159// This structure represents the key of the Node object in the interval tree implementation.
160#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Hash, Ord, Debug)]
161#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
162pub struct RangeInclusive {
163 /// Lower boundary of the interval.
164 start: u64,
165 /// Upper boundary of the interval.
166 end: u64,
167}
168
169#[allow(clippy::len_without_is_empty)]
170impl RangeInclusive {
171 /// Creates a new RangeInclusive.
172 pub fn new(start: u64, end: u64) -> Result<Self> {
173 // The length of the interval [0, u64::MAX] is u64::MAX + 1 which does
174 // not fit in a u64::MAX, hence we return `Error::InvalidRange` when
175 // there is an attempt to use that range.
176 if start > end || (start == 0 && end == u64::MAX) {
177 return Err(Error::InvalidRange(start, end));
178 }
179 Ok(RangeInclusive { start, end })
180 }
181
182 /// Returns the length of the range.
183 pub fn len(&self) -> u64 {
184 self.end - self.start + 1
185 }
186
187 /// Returns true if the regions overlap.
188 pub fn overlaps(&self, other: &RangeInclusive) -> bool {
189 max(self.start, other.start) <= min(self.end, other.end)
190 }
191
192 /// Returns true if the current range contains the range passed as a parameter.
193 pub fn contains(&self, other: &RangeInclusive) -> bool {
194 self.start <= other.start && self.end >= other.end
195 }
196
197 /// Returns the lower boundary of the range.
198 pub fn start(&self) -> u64 {
199 self.start
200 }
201
202 /// Returns the upper boundary of the range.
203 pub fn end(&self) -> u64 {
204 self.end
205 }
206}
207
208/// A resource allocation constraint.
209///
210/// # Example
211///
212/// ```rust
213/// use vm_allocator::{AllocPolicy, Constraint, Error, IdAllocator, DEFAULT_CONSTRAINT_ALIGN};
214///
215/// let constraint =
216/// Constraint::new(0x4, DEFAULT_CONSTRAINT_ALIGN, AllocPolicy::FirstMatch).unwrap();
217/// assert_eq!(constraint.size(), 0x4);
218/// assert_eq!(constraint.align(), 0x4);
219///
220/// // Alignments need to be a power of 2, otherwise an error is returned.
221/// assert_eq!(
222/// Constraint::new(0x4, 0x3, AllocPolicy::LastMatch).unwrap_err(),
223/// Error::InvalidAlignment
224/// );
225///
226/// // When using the ExactMatch policy, the start address must also be aligned, otherwise
227/// // an error is returned.
228/// assert_eq!(
229/// Constraint::new(0x4, 0x4, AllocPolicy::ExactMatch(0x3)).unwrap_err(),
230/// Error::UnalignedAddress
231/// );
232/// ```
233#[derive(Copy, Clone, Debug, Eq, PartialEq)]
234pub struct Constraint {
235 /// Size to allocate.
236 size: u64,
237 /// Alignment for the allocated resource.
238 align: u64,
239 /// Resource allocation policy.
240 policy: AllocPolicy,
241}
242
243impl Constraint {
244 /// Creates a new constraint based on the passed configuration.
245 ///
246 /// When the `ExactMatch` policy is used, the start address MUST be aligned to the
247 /// alignment passed as a parameter.
248 ///
249 /// # Arguments:
250 /// - `size`: size to allocate.
251 /// - `align`: alignment to be used for the allocated resources.
252 /// Valid alignments are a power of 2.
253 /// - `policy`: allocation policy.
254 pub fn new(size: u64, align: u64, policy: AllocPolicy) -> Result<Self> {
255 if size == 0 {
256 return Err(Error::InvalidSize(size));
257 }
258
259 if !align.is_power_of_two() || align == 0 {
260 return Err(Error::InvalidAlignment);
261 }
262
263 if let AllocPolicy::ExactMatch(start_address) = policy {
264 if start_address % align != 0 {
265 return Err(Error::UnalignedAddress);
266 }
267 }
268
269 Ok(Constraint {
270 size,
271 align,
272 policy,
273 })
274 }
275
276 /// Returns the alignment constraint.
277 pub fn align(self) -> u64 {
278 self.align
279 }
280
281 /// Returns the size constraint.
282 pub fn size(self) -> u64 {
283 self.size
284 }
285}
286
287/// Policy for resource allocation.
288#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
289pub enum AllocPolicy {
290 /// Allocate the first matched entry.
291 #[default]
292 FirstMatch,
293 /// Allocate first matched entry from the end of the range.
294 LastMatch,
295 /// Allocate a memory slot starting with the specified address
296 /// if it is available.
297 ExactMatch(u64),
298}
299
300#[cfg(test)]
301mod tests {
302 use super::*;
303
304 #[test]
305 fn test_new_range() {
306 assert_eq!(
307 RangeInclusive::new(2, 1).unwrap_err(),
308 Error::InvalidRange(2, 1)
309 );
310 assert_eq!(
311 RangeInclusive::new(0, u64::MAX).unwrap_err(),
312 Error::InvalidRange(0, u64::MAX)
313 );
314 }
315
316 #[test]
317 fn test_range_overlaps() {
318 let range_a = RangeInclusive::new(1u64, 4u64).unwrap();
319 let range_b = RangeInclusive::new(4u64, 6u64).unwrap();
320 let range_c = RangeInclusive::new(2u64, 3u64).unwrap();
321 let range_e = RangeInclusive::new(5u64, 6u64).unwrap();
322
323 assert!(range_a.overlaps(&range_b));
324 assert!(range_b.overlaps(&range_a));
325 assert!(range_a.overlaps(&range_c));
326 assert!(range_c.overlaps(&range_a));
327 assert!(!range_a.overlaps(&range_e));
328 assert!(!range_e.overlaps(&range_a));
329
330 assert_eq!(range_a.len(), 4);
331 }
332
333 #[test]
334 fn test_range_contain() {
335 let range_a = RangeInclusive::new(2u64, 6u64).unwrap();
336 assert!(range_a.contains(&RangeInclusive::new(2u64, 3u64).unwrap()));
337 assert!(range_a.contains(&RangeInclusive::new(3u64, 4u64).unwrap()));
338 assert!(range_a.contains(&RangeInclusive::new(5u64, 6u64).unwrap()));
339 assert!(!range_a.contains(&RangeInclusive::new(1u64, 2u64).unwrap()));
340 assert!(!range_a.contains(&RangeInclusive::new(1u64, 3u64).unwrap()));
341 assert!(!range_a.contains(&RangeInclusive::new(1u64, 7u64).unwrap()));
342 assert!(!range_a.contains(&RangeInclusive::new(7u64, 8u64).unwrap()));
343 assert!(!range_a.contains(&RangeInclusive::new(6u64, 7u64).unwrap()));
344 assert!(!range_a.contains(&RangeInclusive::new(7u64, 8u64).unwrap()));
345 }
346
347 #[test]
348 fn test_range_ord() {
349 let range_a = RangeInclusive::new(1, 4).unwrap();
350 let range_b = RangeInclusive::new(1, 4).unwrap();
351 let range_c = RangeInclusive::new(1, 3).unwrap();
352 let range_d = RangeInclusive::new(1, 5).unwrap();
353
354 assert_eq!(range_a, range_b);
355 assert_eq!(range_b, range_a);
356 assert!(range_a > range_c);
357 assert!(range_c < range_a);
358 assert!(range_a < range_d);
359 assert!(range_d > range_a);
360 }
361
362 #[test]
363 fn test_getters() {
364 let range = RangeInclusive::new(3, 5).unwrap();
365 assert_eq!(range.start(), 3);
366 assert_eq!(range.end(), 5);
367 }
368
369 #[test]
370 fn test_range_upper_bound() {
371 let range = RangeInclusive::new(0, u64::MAX);
372 assert_eq!(range.unwrap_err(), Error::InvalidRange(0, u64::MAX));
373 }
374
375 #[test]
376 fn constraint_getter() {
377 let bad_constraint = Constraint::new(0x1000, 0x1000, AllocPolicy::ExactMatch(0xC));
378 assert_eq!(bad_constraint.unwrap_err(), Error::UnalignedAddress);
379 let constraint = Constraint::new(0x1000, 0x1000, AllocPolicy::default()).unwrap();
380 assert_eq!(constraint.align(), 0x1000);
381 assert_eq!(constraint.size(), 0x1000);
382 }
383}