vm_allocator/
id_allocator.rs1use crate::{Error, Result};
11use std::collections::BTreeSet;
12
13#[derive(Debug, Clone)]
21#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
22pub struct IdAllocator {
23 range_base: u32,
25 next_id: Option<u32>,
27 range_end: u32,
29 freed_ids: BTreeSet<u32>,
31}
32
33impl IdAllocator {
34 pub fn new(range_base: u32, range_end: u32) -> Result<Self> {
38 if range_end < range_base {
39 return Err(Error::InvalidRange(range_base.into(), range_end.into()));
40 }
41 Ok(IdAllocator {
42 range_base,
43 next_id: Some(range_base),
44 range_end,
45 freed_ids: BTreeSet::<u32>::new(),
46 })
47 }
48
49 fn id_in_range(&self, id: u32) -> bool {
50 self.range_base <= id && id <= self.range_end
52 }
53
54 pub fn allocate_id(&mut self) -> Result<u32> {
58 if !self.freed_ids.is_empty() {
61 let ret_value = *self.freed_ids.iter().next().unwrap();
62 self.freed_ids.remove(&ret_value);
63 return Ok(ret_value);
64 }
65 if let Some(next_id) = self.next_id {
67 if next_id > self.range_end {
68 return Err(Error::ResourceNotAvailable);
69 }
70 self.next_id = next_id.checked_add(1);
74 return Ok(next_id);
75 }
76 Err(Error::Overflow)
77 }
78
79 pub fn free_id(&mut self, id: u32) -> Result<u32> {
81 if !self.id_in_range(id) {
84 return Err(Error::OutOfRange(id));
85 }
86 if let Some(next_id) = self.next_id {
87 if next_id < id {
88 return Err(Error::NeverAllocated(id));
89 }
90 }
91
92 self.freed_ids
95 .insert(id)
96 .then_some(id)
97 .ok_or(Error::AlreadyReleased(id))
98 }
99}
100
101#[cfg(test)]
102mod tests {
103 use crate::id_allocator::IdAllocator;
104 use crate::Error;
105
106 #[test]
107 fn test_slot_id_allocation() {
108 let faulty_allocator = IdAllocator::new(23, 5);
109 assert_eq!(faulty_allocator.unwrap_err(), Error::InvalidRange(23, 5));
110 let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
111 assert_eq!(legacy_irq_allocator.range_base, 5);
112 assert_eq!(legacy_irq_allocator.range_end, 23);
113
114 let id = legacy_irq_allocator.allocate_id().unwrap();
115 assert_eq!(id, 5);
116 assert_eq!(legacy_irq_allocator.next_id.unwrap(), 6);
117
118 for _ in 1..19 {
119 assert!(legacy_irq_allocator.allocate_id().is_ok());
120 }
121
122 assert_eq!(
123 legacy_irq_allocator.allocate_id().unwrap_err(),
124 Error::ResourceNotAvailable
125 );
126 }
127
128 #[test]
129 fn test_u32_overflow() {
130 let mut allocator = IdAllocator::new(u32::MAX - 1, u32::MAX).unwrap();
131 assert_eq!(allocator.allocate_id().unwrap(), u32::MAX - 1);
132 assert_eq!(allocator.allocate_id().unwrap(), u32::MAX);
133 let res = allocator.allocate_id();
134 assert!(res.is_err());
135 assert_eq!(res.unwrap_err(), Error::Overflow);
136 }
137
138 #[test]
139 fn test_slot_id_free() {
140 let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
141 assert_eq!(
142 legacy_irq_allocator.free_id(3).unwrap_err(),
143 Error::OutOfRange(3)
144 );
145 assert_eq!(legacy_irq_allocator.freed_ids.len(), 0);
146
147 for _ in 1..10 {
148 let _id = legacy_irq_allocator.allocate_id().unwrap();
149 }
150
151 let irq = 10;
152 legacy_irq_allocator.free_id(irq).unwrap();
153 assert!(legacy_irq_allocator.freed_ids.contains(&irq));
154 assert_eq!(
155 legacy_irq_allocator.free_id(10).unwrap_err(),
156 Error::AlreadyReleased(10)
157 );
158 let irq = 9;
159 legacy_irq_allocator.free_id(irq).unwrap();
160 assert_eq!(legacy_irq_allocator.freed_ids.len(), 2);
161 assert_eq!(*legacy_irq_allocator.freed_ids.iter().next().unwrap(), 9);
162
163 let irq = legacy_irq_allocator.allocate_id().unwrap();
164 assert_eq!(irq, 9);
165 assert!(!legacy_irq_allocator.freed_ids.contains(&irq));
166 assert_eq!(legacy_irq_allocator.freed_ids.len(), 1);
167 assert_eq!(
168 legacy_irq_allocator.free_id(21).unwrap_err(),
169 Error::NeverAllocated(21)
170 );
171 }
172
173 #[test]
174 fn test_id_sanity_checks() {
175 let legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
176
177 assert!(!legacy_irq_allocator.id_in_range(4));
178 assert!(legacy_irq_allocator.id_in_range(6));
179 assert!(!legacy_irq_allocator.id_in_range(25));
180 }
181}