use crate::{Error, Result};
use std::collections::BTreeSet;
#[derive(Debug)]
pub struct IdAllocator {
range_base: u32,
next_id: Option<u32>,
range_end: u32,
freed_ids: BTreeSet<u32>,
}
impl IdAllocator {
pub fn new(range_base: u32, range_end: u32) -> Result<Self> {
if range_end < range_base {
return Err(Error::InvalidRange(range_base.into(), range_end.into()));
}
Ok(IdAllocator {
range_base,
next_id: Some(range_base),
range_end,
freed_ids: BTreeSet::<u32>::new(),
})
}
fn id_in_range(&self, id: u32) -> bool {
self.range_base <= id && id <= self.range_end
}
pub fn allocate_id(&mut self) -> Result<u32> {
if !self.freed_ids.is_empty() {
let ret_value = *self.freed_ids.iter().next().unwrap();
self.freed_ids.remove(&ret_value);
return Ok(ret_value);
}
if let Some(next_id) = self.next_id {
if next_id > self.range_end {
return Err(Error::ResourceNotAvailable);
}
self.next_id = next_id.checked_add(1);
return Ok(next_id);
}
Err(Error::Overflow)
}
pub fn free_id(&mut self, id: u32) -> Result<u32> {
if !self.id_in_range(id) {
return Err(Error::OutOfRange(id));
}
if let Some(next_id) = self.next_id {
if next_id < id {
return Err(Error::NeverAllocated(id));
}
}
self.freed_ids
.insert(id)
.then(|| id)
.ok_or(Error::AlreadyReleased(id))
}
}
#[cfg(test)]
mod tests {
use crate::id_allocator::IdAllocator;
use crate::Error;
#[test]
fn test_slot_id_allocation() {
let faulty_allocator = IdAllocator::new(23, 5);
assert_eq!(faulty_allocator.unwrap_err(), Error::InvalidRange(23, 5));
let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
assert_eq!(legacy_irq_allocator.range_base, 5);
assert_eq!(legacy_irq_allocator.range_end, 23);
let id = legacy_irq_allocator.allocate_id().unwrap();
assert_eq!(id, 5);
assert_eq!(legacy_irq_allocator.next_id.unwrap(), 6);
for _ in 1..19 {
assert!(legacy_irq_allocator.allocate_id().is_ok());
}
assert_eq!(
legacy_irq_allocator.allocate_id().unwrap_err(),
Error::ResourceNotAvailable
);
}
#[test]
fn test_u32_overflow() {
let mut allocator = IdAllocator::new(u32::MAX - 1, u32::MAX).unwrap();
assert_eq!(allocator.allocate_id().unwrap(), u32::MAX - 1);
assert_eq!(allocator.allocate_id().unwrap(), u32::MAX);
let res = allocator.allocate_id();
assert!(res.is_err());
assert_eq!(res.unwrap_err(), Error::Overflow);
}
#[test]
fn test_slot_id_free() {
let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
assert_eq!(
legacy_irq_allocator.free_id(3).unwrap_err(),
Error::OutOfRange(3)
);
assert_eq!(legacy_irq_allocator.freed_ids.len(), 0);
for _ in 1..10 {
let _id = legacy_irq_allocator.allocate_id().unwrap();
}
let irq = 10;
legacy_irq_allocator.free_id(irq).unwrap();
assert!(legacy_irq_allocator.freed_ids.contains(&irq));
assert_eq!(
legacy_irq_allocator.free_id(10).unwrap_err(),
Error::AlreadyReleased(10)
);
let irq = 9;
legacy_irq_allocator.free_id(irq).unwrap();
assert_eq!(legacy_irq_allocator.freed_ids.len(), 2);
assert_eq!(*legacy_irq_allocator.freed_ids.iter().next().unwrap(), 9);
let irq = legacy_irq_allocator.allocate_id().unwrap();
assert_eq!(irq, 9);
assert!(!legacy_irq_allocator.freed_ids.contains(&irq));
assert_eq!(legacy_irq_allocator.freed_ids.len(), 1);
assert_eq!(
legacy_irq_allocator.free_id(21).unwrap_err(),
Error::NeverAllocated(21)
);
}
#[test]
fn test_id_sanity_checks() {
let legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
assert!(!legacy_irq_allocator.id_in_range(4));
assert!(legacy_irq_allocator.id_in_range(6));
assert!(!legacy_irq_allocator.id_in_range(25));
}
}