Skip to main content

small_collections/maps/
heapless_btree_map.rs

1//! Stack-allocated sorted map and its key-value entry type.
2//!
3//! This module is the **stack half** of [`SmallBTreeMap`](crate::SmallBTreeMap).  It holds up to
4//! `N` entries in a `heapless::Vec` sorted by key and exposes an `insert` that returns
5//! `Err((key, value))` when the vec is full so the caller can transparently *spill* to a heap
6//! `BTreeMap`.
7
8use core::borrow::Borrow;
9use core::cmp::Ordering;
10use heapless::Vec as HeaplessVec;
11
12// ─── Entry ────────────────────────────────────────────────────────────────────
13
14/// A key-value pair whose ordering is determined **solely** by the key.
15///
16/// This newtype is stored inside [`HeaplessBTreeMap`]'s internal `heapless::Vec` so that
17/// `binary_search_by` can use the standard `Ord` implementation without needing a
18/// separate comparator every call site.
19///
20/// # Design Consideration
21/// The value is intentionally excluded from all comparison traits.  This mirrors the
22/// semantics of `BTreeMap`: two entries with the same key are considered *equal*
23/// regardless of their values, making in-place replacement safe.
24#[derive(Debug, Clone)]
25pub struct Entry<K, V>(pub K, pub V);
26
27impl<K: PartialEq, V> PartialEq for Entry<K, V> {
28    /// Returns `true` iff the two entries share the same key.
29    fn eq(&self, other: &Self) -> bool {
30        self.0.eq(&other.0)
31    }
32}
33
34impl<K: Eq, V> Eq for Entry<K, V> {}
35
36impl<K: PartialOrd, V> PartialOrd for Entry<K, V> {
37    /// Delegates to the key's `partial_cmp`; the value is ignored.
38    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
39        self.0.partial_cmp(&other.0)
40    }
41}
42
43impl<K: Ord, V> Ord for Entry<K, V> {
44    /// Delegates to the key's `cmp`; the value is ignored.
45    fn cmp(&self, other: &Self) -> Ordering {
46        self.0.cmp(&other.0)
47    }
48}
49
50// ─── HeaplessBTreeMap ─────────────────────────────────────────────────────────
51
52/// A **stack-allocated**, sorted map backed by a `heapless::Vec<Entry<K, V>, N>`.
53///
54/// # Overview
55/// Entries are kept in ascending key order at all times.  Insertions and lookups use
56/// `binary_search_by` for **O(log N)** key comparisons.  Insertion still takes **O(N)**
57/// time overall because items may need to be shifted to maintain sorted order — acceptable
58/// for small `N` (e.g. ≤ 32) where the constant factor of a move is very cheap.
59///
60/// # Overflow protocol
61/// When the map is full and a *new* key is inserted, [`insert`](HeaplessBTreeMap::insert)
62/// returns `Err((key, value))` with the original key and value back.  The caller is
63/// expected to **spill** to a heap-backed `BTreeMap` and retry the insert there.
64///
65/// # Generic parameters
66/// | Parameter | Meaning |
67/// |-----------|---------|
68/// | `K`       | Key type; must implement `Ord` |
69/// | `V`       | Value type |
70/// | `N`       | Stack capacity (number of entries) |
71///
72/// # Design Considerations
73/// - **No allocator dependency**: uses `heapless::Vec`, so the entire structure lives in
74///   the caller's stack frame.
75/// - **`Borrow<Q>` support**: `get`, `get_mut`, and `remove` accept any borrowed form of
76///   the key (e.g. `&str` for `String` keys) via the standard `Borrow` trait.
77/// - **Sorted-vec vs hash**: sorted-vec was chosen because `BTreeMap` semantics require
78///   sorted iteration.  A hash-based approach would break `IntoIterator` order guarantees.
79///
80/// # Pseudo-code Implementation
81/// `HeaplessBTreeMap` maintains a sorted array of `(Key, Value)` entries.
82///
83/// ```text
84/// // 1. Lookup (get)
85/// idx = binary_search(key) // O(log N)
86/// if idx: return values[idx]
87///
88/// // 2. Insertion (insert)
89/// idx = binary_search(key)
90/// if idx:
91///     update values[idx]; return old
92/// else:
93///     if len == N: return Err (Full)
94///     shift_right(idx..len) // O(N)
95///     insert (key, value) at idx
96///
97/// // 3. Removal (remove)
98/// idx = binary_search(key)
99/// if idx:
100///     val = values[idx]
101///     shift_left(idx+1..len) // O(N)
102///     return val
103/// ```
104#[derive(Debug, Clone)]
105pub struct HeaplessBTreeMap<K, V, const N: usize> {
106    buf: HeaplessVec<Entry<K, V>, N>,
107}
108
109impl<K: Ord, V, const N: usize> HeaplessBTreeMap<K, V, N> {
110    /// Creates an empty map.  No allocation occurs.
111    pub fn new() -> Self {
112        const {
113            assert!(
114                std::mem::size_of::<Self>() <= 16 * 1024,
115                "HeaplessBTreeMap is too large! The struct size exceeds the 16KB limit. Reduce N."
116            );
117        }
118        Self {
119            buf: HeaplessVec::new(),
120        }
121    }
122
123    /// Returns the number of entries currently stored.
124    pub fn len(&self) -> usize {
125        self.buf.len()
126    }
127
128    /// Returns `true` if the map contains no entries.
129    pub fn is_empty(&self) -> bool {
130        self.buf.is_empty()
131    }
132
133    /// Returns `true` if the map has reached its compile-time capacity `N`.
134    ///
135    /// When `is_full()` returns `true`, inserting a *new* key will return `Err`.
136    pub fn is_full(&self) -> bool {
137        self.buf.is_full()
138    }
139
140    /// Removes all entries, dropping the keys and values in place.
141    pub fn clear(&mut self) {
142        self.buf.clear();
143    }
144
145    /// Inserts or updates a key-value pair.
146    ///
147    /// # Returns
148    /// | Variant | Meaning |
149    /// |---------|---------|
150    /// | `Ok(Some(old))` | Key already existed; old value returned, new value stored. |
151    /// | `Ok(None)` | Key was new; entry inserted in sort position. |
152    /// | `Err((key, value))` | Map is full and key is new; **caller must spill to heap**. |
153    ///
154    /// # Complexity
155    /// O(log N) for the binary search, O(N) for the potential shift on insertion.
156    pub fn insert(&mut self, key: K, value: V) -> Result<Option<V>, (K, V)> {
157        match self.buf.binary_search_by(|e| e.0.cmp(&key)) {
158            Ok(idx) => Ok(Some(core::mem::replace(&mut self.buf[idx].1, value))),
159            Err(idx) => {
160                if self.buf.is_full() {
161                    Err((key, value))
162                } else {
163                    self.buf.insert(idx, Entry(key, value)).ok().unwrap();
164                    Ok(None)
165                }
166            }
167        }
168    }
169
170    /// Returns a shared reference to the value associated with `key`, or `None`.
171    ///
172    /// Accepts any type `Q` where `K: Borrow<Q>` and `Q: Ord`.
173    /// Complexity: O(log N).
174    pub fn get<Q>(&self, key: &Q) -> Option<&V>
175    where
176        K: Borrow<Q>,
177        Q: Ord + ?Sized,
178    {
179        match self.buf.binary_search_by(|e| e.0.borrow().cmp(key)) {
180            Ok(idx) => Some(&self.buf[idx].1),
181            Err(_) => None,
182        }
183    }
184
185    /// Returns an exclusive reference to the value associated with `key`, or `None`.
186    ///
187    /// Accepts any type `Q` where `K: Borrow<Q>` and `Q: Ord`.
188    /// Complexity: O(log N).
189    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
190    where
191        K: Borrow<Q>,
192        Q: Ord + ?Sized,
193    {
194        match self.buf.binary_search_by(|e| e.0.borrow().cmp(key)) {
195            Ok(idx) => Some(&mut self.buf[idx].1),
196            Err(_) => None,
197        }
198    }
199
200    /// Removes the entry associated with `key` and returns its value, or `None` if absent.
201    ///
202    /// Accepts any type `Q` where `K: Borrow<Q>` and `Q: Ord`.
203    /// Complexity: O(log N) for the search, O(N) for the shift-left after removal.
204    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
205    where
206        K: Borrow<Q>,
207        Q: Ord + ?Sized,
208    {
209        match self.buf.binary_search_by(|e| e.0.borrow().cmp(key)) {
210            Ok(idx) => Some(self.buf.remove(idx).1),
211            Err(_) => None,
212        }
213    }
214
215    /// Returns an iterator over `&Entry<K, V>` in ascending key order.
216    ///
217    /// The iterator yields shared references; entries cannot be modified through it.
218    pub fn iter(&self) -> core::slice::Iter<'_, Entry<K, V>> {
219        self.buf.iter()
220    }
221
222    /// Consumes `self` and returns the underlying `heapless::Vec`.
223    ///
224    /// Useful when the caller needs raw access to the sorted buffer, e.g. to drain entries
225    /// during a spill-to-heap operation.
226    pub fn into_vec(self) -> HeaplessVec<Entry<K, V>, N> {
227        self.buf
228    }
229}
230
231impl<K: Ord, V, const N: usize> Default for HeaplessBTreeMap<K, V, N> {
232    /// Creates an empty map.  Equivalent to [`HeaplessBTreeMap::new`].
233    fn default() -> Self {
234        Self::new()
235    }
236}
237
238impl<K: Ord, V, const N: usize> IntoIterator for HeaplessBTreeMap<K, V, N> {
239    type Item = Entry<K, V>;
240    type IntoIter = heapless::vec::IntoIter<Entry<K, V>, N, usize>;
241
242    /// Consumes `self` and returns an iterator over `Entry<K, V>` in ascending key order.
243    fn into_iter(self) -> Self::IntoIter {
244        self.buf.into_iter()
245    }
246}
247
248impl<K: Ord, V: PartialEq, const N: usize> PartialEq for HeaplessBTreeMap<K, V, N> {
249    fn eq(&self, other: &Self) -> bool {
250        if self.len() != other.len() {
251            return false;
252        }
253        self.iter()
254            .zip(other.iter())
255            .all(|(a, b)| a.0 == b.0 && a.1 == b.1)
256    }
257}
258
259impl<K: Ord, V: Eq, const N: usize> Eq for HeaplessBTreeMap<K, V, N> {}
260
261impl<K: Ord, V: PartialOrd, const N: usize> PartialOrd for HeaplessBTreeMap<K, V, N> {
262    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
263        self.iter()
264            .map(|e| (&e.0, &e.1))
265            .partial_cmp(other.iter().map(|e| (&e.0, &e.1)))
266    }
267}
268
269impl<K: Ord, V: Ord, const N: usize> Ord for HeaplessBTreeMap<K, V, N> {
270    fn cmp(&self, other: &Self) -> Ordering {
271        self.iter()
272            .map(|e| (&e.0, &e.1))
273            .cmp(other.iter().map(|e| (&e.0, &e.1)))
274    }
275}
276
277#[cfg(test)]
278mod tests {
279    use super::*;
280
281    #[test]
282    fn test_heapless_btree_map_stack_ops_sorted_order() {
283        let mut map: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
284        map.insert(3, 30).unwrap();
285        map.insert(1, 10).unwrap();
286        map.insert(2, 20).unwrap();
287
288        let keys: Vec<_> = map.iter().map(|e| e.0).collect();
289        assert_eq!(keys, vec![1, 2, 3]);
290    }
291
292    #[test]
293    fn test_heapless_btree_map_stack_ops_update() {
294        let mut map: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
295        assert_eq!(map.insert(1, 10), Ok(None));
296        assert_eq!(map.insert(1, 20), Ok(Some(10)));
297        assert_eq!(map.get(&1), Some(&20));
298    }
299
300    #[test]
301    fn test_heapless_btree_map_stack_ops_remove() {
302        let mut map: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
303        map.insert(1, 10).unwrap();
304        map.insert(2, 20).unwrap();
305        assert_eq!(map.remove(&1), Some(10));
306        assert_eq!(map.len(), 1);
307        assert_eq!(map.get(&1), None);
308    }
309
310    #[test]
311    fn test_heapless_btree_map_stack_ops_full_returns_err() {
312        let mut map: HeaplessBTreeMap<i32, i32, 2> = HeaplessBTreeMap::new();
313        map.insert(1, 10).unwrap();
314        map.insert(2, 20).unwrap();
315        assert!(map.is_full());
316        assert_eq!(map.insert(3, 30), Err((3, 30)));
317    }
318
319    #[test]
320    fn test_heapless_btree_map_stack_ops_get_mut() {
321        let mut map: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
322        map.insert(1, 10).unwrap();
323        if let Some(v) = map.get_mut(&1) {
324            *v = 99;
325        }
326        assert_eq!(map.get(&1), Some(&99));
327    }
328
329    #[test]
330    fn test_heapless_btree_map_traits_clone_default() {
331        let map1: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::default();
332        let mut map2 = map1.clone();
333        map2.insert(1, 1).unwrap();
334        assert_eq!(map1.len(), 0);
335        assert_eq!(map2.len(), 1);
336    }
337
338    #[test]
339    fn test_heapless_btree_map_traits_entry_ord() {
340        let e1 = Entry(1i32, 10i32);
341        let e2 = Entry(1i32, 20i32);
342        let e3 = Entry(2i32, 10i32);
343        assert_eq!(e1, e2);
344        assert!(e1 < e3);
345        assert_eq!(e1.cmp(&e2), Ordering::Equal);
346        assert_eq!(e1.partial_cmp(&e3), Some(Ordering::Less));
347    }
348}
349
350#[cfg(test)]
351mod heapless_btree_map_coverage_tests {
352    use super::*;
353
354    #[test]
355    fn test_is_empty_false() {
356        let mut map: HeaplessBTreeMap<i32, i32, 2> = HeaplessBTreeMap::new();
357        map.insert(1, 10).unwrap();
358        assert!(!map.is_empty());
359    }
360
361    #[test]
362    fn test_get_mut_missing() {
363        let mut map: HeaplessBTreeMap<i32, i32, 2> = HeaplessBTreeMap::new();
364        map.insert(1, 10).unwrap();
365        assert_eq!(map.get_mut(&2), None);
366    }
367
368    #[test]
369    fn test_into_vec() {
370        let mut map: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
371        map.insert(2, 20).unwrap();
372        map.insert(1, 10).unwrap();
373
374        // Stored in sorted order: (1, 10), (2, 20)
375        let vec = map.into_vec();
376        assert_eq!(vec.len(), 2);
377        assert_eq!(vec[0].0, 1);
378        assert_eq!(vec[1].0, 2);
379    }
380
381    #[test]
382    fn test_traits_partial_eq() {
383        let mut m1: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
384        m1.insert(1, 10).unwrap();
385
386        let mut m2: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
387        m2.insert(1, 10).unwrap();
388
389        let mut m3: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
390        m3.insert(1, 10).unwrap();
391        m3.insert(2, 20).unwrap();
392
393        let mut m4: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
394        m4.insert(1, 99).unwrap(); // Same key, diff val
395
396        assert_eq!(m1, m2);
397        assert_ne!(m1, m3); // len differing
398        assert_ne!(m1, m4); // vals differing
399    }
400
401    #[test]
402    fn test_traits_ord_partial_ord() {
403        let mut m1: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
404        m1.insert(1, 10).unwrap();
405        m1.insert(2, 20).unwrap();
406
407        let mut m2: HeaplessBTreeMap<i32, i32, 4> = HeaplessBTreeMap::new();
408        m2.insert(1, 10).unwrap();
409        m2.insert(3, 30).unwrap();
410
411        assert!(m1 < m2);
412        assert_eq!(m1.cmp(&m2), Ordering::Less);
413        assert_eq!(m1.partial_cmp(&m2), Some(Ordering::Less));
414
415        // Identity
416        assert_eq!(m1.cmp(&m1), Ordering::Equal);
417    }
418}