vm_allocator/
id_allocator.rs

1// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2// Copyright © 2019 Intel Corporation. All Rights Reserved.
3// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
4
5//! Provides allocation and releasing strategy for IDs.
6//!
7//! This module implements an allocation strategies for all resource types
8//! that can be abstracted to an integer.
9
10use crate::{Error, Result};
11use std::collections::BTreeSet;
12
13/// An unique ID allocator that allows management of IDs in a given interval.
14// Internal representation of IdAllocator. Contains the ends of the interval
15// that is managed, a field that points at the next available ID, and a
16// BTreeSet used to store the released IDs. The reason we chose a
17// BTreeSet is that the average complexity for deletion and insertion is
18// O(logN) compared to Vec for example, another benefit is that the entries
19// are sorted so we will always use the first available ID.
20#[derive(Debug, Clone)]
21#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
22pub struct IdAllocator {
23    // Beginning of the range of IDs that we want to manage.
24    range_base: u32,
25    // First available id that was never allocated.
26    next_id: Option<u32>,
27    // End of the range of IDs that we want to manage.
28    range_end: u32,
29    // Set of all freed ids that can be reused at subsequent allocations.
30    freed_ids: BTreeSet<u32>,
31}
32
33impl IdAllocator {
34    /// Creates a new instance of IdAllocator that will be used to manage the
35    /// allocation and release of ids from the interval specified by
36    /// `range_base` and `range_end`
37    pub fn new(range_base: u32, range_end: u32) -> Result<Self> {
38        if range_end < range_base {
39            return Err(Error::InvalidRange(range_base.into(), range_end.into()));
40        }
41        Ok(IdAllocator {
42            range_base,
43            next_id: Some(range_base),
44            range_end,
45            freed_ids: BTreeSet::<u32>::new(),
46        })
47    }
48
49    fn id_in_range(&self, id: u32) -> bool {
50        // Check for out of range.
51        self.range_base <= id && id <= self.range_end
52    }
53
54    /// Allocate an ID from the managed range.
55    /// We first try to reuse one of the IDs released before. If there is no
56    /// ID to reuse we return the next available one from the managed range.
57    pub fn allocate_id(&mut self) -> Result<u32> {
58        // If the set containing all freed ids is not empty we extract the
59        // first entry from that set and return it.
60        if !self.freed_ids.is_empty() {
61            let ret_value = *self.freed_ids.iter().next().unwrap();
62            self.freed_ids.remove(&ret_value);
63            return Ok(ret_value);
64        }
65        // If no id was freed before we return the next available id.
66        if let Some(next_id) = self.next_id {
67            if next_id > self.range_end {
68                return Err(Error::ResourceNotAvailable);
69            }
70            // Prepare the next available id. If the addition overflows we
71            // set the next_id field to None and return Overflow at the next
72            // allocation request.
73            self.next_id = next_id.checked_add(1);
74            return Ok(next_id);
75        }
76        Err(Error::Overflow)
77    }
78
79    /// Frees an id from the managed range.
80    pub fn free_id(&mut self, id: u32) -> Result<u32> {
81        // Check if the id belongs to the managed range and if it was not
82        // released before. Return error if any of the conditions is not met.
83        if !self.id_in_range(id) {
84            return Err(Error::OutOfRange(id));
85        }
86        if let Some(next_id) = self.next_id {
87            if next_id < id {
88                return Err(Error::NeverAllocated(id));
89            }
90        }
91
92        // Insert the released id in the set of released id to avoid releasing
93        // it in next iterations.
94        self.freed_ids
95            .insert(id)
96            .then_some(id)
97            .ok_or(Error::AlreadyReleased(id))
98    }
99}
100
101#[cfg(test)]
102mod tests {
103    use crate::id_allocator::IdAllocator;
104    use crate::Error;
105
106    #[test]
107    fn test_slot_id_allocation() {
108        let faulty_allocator = IdAllocator::new(23, 5);
109        assert_eq!(faulty_allocator.unwrap_err(), Error::InvalidRange(23, 5));
110        let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
111        assert_eq!(legacy_irq_allocator.range_base, 5);
112        assert_eq!(legacy_irq_allocator.range_end, 23);
113
114        let id = legacy_irq_allocator.allocate_id().unwrap();
115        assert_eq!(id, 5);
116        assert_eq!(legacy_irq_allocator.next_id.unwrap(), 6);
117
118        for _ in 1..19 {
119            assert!(legacy_irq_allocator.allocate_id().is_ok());
120        }
121
122        assert_eq!(
123            legacy_irq_allocator.allocate_id().unwrap_err(),
124            Error::ResourceNotAvailable
125        );
126    }
127
128    #[test]
129    fn test_u32_overflow() {
130        let mut allocator = IdAllocator::new(u32::MAX - 1, u32::MAX).unwrap();
131        assert_eq!(allocator.allocate_id().unwrap(), u32::MAX - 1);
132        assert_eq!(allocator.allocate_id().unwrap(), u32::MAX);
133        let res = allocator.allocate_id();
134        assert!(res.is_err());
135        assert_eq!(res.unwrap_err(), Error::Overflow);
136    }
137
138    #[test]
139    fn test_slot_id_free() {
140        let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
141        assert_eq!(
142            legacy_irq_allocator.free_id(3).unwrap_err(),
143            Error::OutOfRange(3)
144        );
145        assert_eq!(legacy_irq_allocator.freed_ids.len(), 0);
146
147        for _ in 1..10 {
148            let _id = legacy_irq_allocator.allocate_id().unwrap();
149        }
150
151        let irq = 10;
152        legacy_irq_allocator.free_id(irq).unwrap();
153        assert!(legacy_irq_allocator.freed_ids.contains(&irq));
154        assert_eq!(
155            legacy_irq_allocator.free_id(10).unwrap_err(),
156            Error::AlreadyReleased(10)
157        );
158        let irq = 9;
159        legacy_irq_allocator.free_id(irq).unwrap();
160        assert_eq!(legacy_irq_allocator.freed_ids.len(), 2);
161        assert_eq!(*legacy_irq_allocator.freed_ids.iter().next().unwrap(), 9);
162
163        let irq = legacy_irq_allocator.allocate_id().unwrap();
164        assert_eq!(irq, 9);
165        assert!(!legacy_irq_allocator.freed_ids.contains(&irq));
166        assert_eq!(legacy_irq_allocator.freed_ids.len(), 1);
167        assert_eq!(
168            legacy_irq_allocator.free_id(21).unwrap_err(),
169            Error::NeverAllocated(21)
170        );
171    }
172
173    #[test]
174    fn test_id_sanity_checks() {
175        let legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
176
177        assert!(!legacy_irq_allocator.id_in_range(4));
178        assert!(legacy_irq_allocator.id_in_range(6));
179        assert!(!legacy_irq_allocator.id_in_range(25));
180    }
181}