1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
// Copyright 2022 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// Copyright © 2019 Intel Corporation. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause

//! Provides allocation and releasing strategy for IDs.
//!
//! This module implements an allocation strategies for all resource types
//! that can be abstracted to an integer.

use crate::{Error, Result};
use std::collections::BTreeSet;

/// An unique ID allocator that allows management of IDs in a given interval.
// Internal representation of IdAllocator. Contains the ends of the interval
// that is managed, a field that points at the next available ID, and a
// BTreeSet used to store the released IDs. The reason we chose a
// BTreeSet is that the average complexity for deletion and insertion is
// O(logN) compared to Vec for example, another benefit is that the entries
// are sorted so we will always use the first available ID.
#[derive(Debug)]
pub struct IdAllocator {
    // Beginning of the range of IDs that we want to manage.
    range_base: u32,
    // First available id that was never allocated.
    next_id: Option<u32>,
    // End of the range of IDs that we want to manage.
    range_end: u32,
    // Set of all freed ids that can be reused at subsequent allocations.
    freed_ids: BTreeSet<u32>,
}

impl IdAllocator {
    /// Creates a new instance of IdAllocator that will be used to manage the
    /// allocation and release of ids from the interval specified by
    /// `range_base` and `range_end`
    pub fn new(range_base: u32, range_end: u32) -> Result<Self> {
        if range_end < range_base {
            return Err(Error::InvalidRange(range_base.into(), range_end.into()));
        }
        Ok(IdAllocator {
            range_base,
            next_id: Some(range_base),
            range_end,
            freed_ids: BTreeSet::<u32>::new(),
        })
    }

    fn id_in_range(&self, id: u32) -> bool {
        // Check for out of range.
        self.range_base <= id && id <= self.range_end
    }

    /// Allocate an ID from the managed range.
    /// We first try to reuse one of the IDs released before. If there is no
    /// ID to reuse we return the next available one from the managed range.
    pub fn allocate_id(&mut self) -> Result<u32> {
        // If the set containing all freed ids is not empty we extract the
        // first entry from that set and return it.
        if !self.freed_ids.is_empty() {
            let ret_value = *self.freed_ids.iter().next().unwrap();
            self.freed_ids.remove(&ret_value);
            return Ok(ret_value);
        }
        // If no id was freed before we return the next available id.
        if let Some(next_id) = self.next_id {
            if next_id > self.range_end {
                return Err(Error::ResourceNotAvailable);
            }
            // Prepare the next available id. If the addition overflows we
            // set the next_id field to None and return Overflow at the next
            // allocation request.
            self.next_id = next_id.checked_add(1);
            return Ok(next_id);
        }
        Err(Error::Overflow)
    }

    /// Frees an id from the managed range.
    pub fn free_id(&mut self, id: u32) -> Result<u32> {
        // Check if the id belongs to the managed range and if it was not
        // released before. Return error if any of the conditions is not met.
        if !self.id_in_range(id) {
            return Err(Error::OutOfRange(id));
        }
        if let Some(next_id) = self.next_id {
            if next_id < id {
                return Err(Error::NeverAllocated(id));
            }
        }

        // Insert the released id in the set of released id to avoid releasing
        // it in next iterations.
        self.freed_ids
            .insert(id)
            .then(|| id)
            .ok_or(Error::AlreadyReleased(id))
    }
}

#[cfg(test)]
mod tests {
    use crate::id_allocator::IdAllocator;
    use crate::Error;

    #[test]
    fn test_slot_id_allocation() {
        let faulty_allocator = IdAllocator::new(23, 5);
        assert_eq!(faulty_allocator.unwrap_err(), Error::InvalidRange(23, 5));
        let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
        assert_eq!(legacy_irq_allocator.range_base, 5);
        assert_eq!(legacy_irq_allocator.range_end, 23);

        let id = legacy_irq_allocator.allocate_id().unwrap();
        assert_eq!(id, 5);
        assert_eq!(legacy_irq_allocator.next_id.unwrap(), 6);

        for _ in 1..19 {
            assert!(legacy_irq_allocator.allocate_id().is_ok());
        }

        assert_eq!(
            legacy_irq_allocator.allocate_id().unwrap_err(),
            Error::ResourceNotAvailable
        );
    }

    #[test]
    fn test_u32_overflow() {
        let mut allocator = IdAllocator::new(u32::MAX - 1, u32::MAX).unwrap();
        assert_eq!(allocator.allocate_id().unwrap(), u32::MAX - 1);
        assert_eq!(allocator.allocate_id().unwrap(), u32::MAX);
        let res = allocator.allocate_id();
        assert!(res.is_err());
        assert_eq!(res.unwrap_err(), Error::Overflow);
    }

    #[test]
    fn test_slot_id_free() {
        let mut legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();
        assert_eq!(
            legacy_irq_allocator.free_id(3).unwrap_err(),
            Error::OutOfRange(3)
        );
        assert_eq!(legacy_irq_allocator.freed_ids.len(), 0);

        for _ in 1..10 {
            let _id = legacy_irq_allocator.allocate_id().unwrap();
        }

        let irq = 10;
        legacy_irq_allocator.free_id(irq).unwrap();
        assert!(legacy_irq_allocator.freed_ids.contains(&irq));
        assert_eq!(
            legacy_irq_allocator.free_id(10).unwrap_err(),
            Error::AlreadyReleased(10)
        );
        let irq = 9;
        legacy_irq_allocator.free_id(irq).unwrap();
        assert_eq!(legacy_irq_allocator.freed_ids.len(), 2);
        assert_eq!(*legacy_irq_allocator.freed_ids.iter().next().unwrap(), 9);

        let irq = legacy_irq_allocator.allocate_id().unwrap();
        assert_eq!(irq, 9);
        assert!(!legacy_irq_allocator.freed_ids.contains(&irq));
        assert_eq!(legacy_irq_allocator.freed_ids.len(), 1);
        assert_eq!(
            legacy_irq_allocator.free_id(21).unwrap_err(),
            Error::NeverAllocated(21)
        );
    }

    #[test]
    fn test_id_sanity_checks() {
        let legacy_irq_allocator = IdAllocator::new(5, 23).unwrap();

        assert!(!legacy_irq_allocator.id_in_range(4));
        assert!(legacy_irq_allocator.id_in_range(6));
        assert!(!legacy_irq_allocator.id_in_range(25));
    }
}