Skip to main content

small_collections/maps/
map.rs

1//! Hash map that lives on the stack and spills to the heap.
2//!
3//! Provides [`SmallMap`] — backed by a `heapless::FnvIndexMap` on the stack (power-of-two
4//! capacity `N`) and a `hashbrown::HashMap` on the heap.  Both sides use FNV hashing for
5//! consistent, low-latency performance.
6//!
7//! [`AnyMap`] is an object-safe trait abstracting over `SmallMap`, `HashMap`, and `OrderMap`.
8//! [`SmallMapEntry`] mirrors the standard `Entry` API for in-place mutation.
9
10use core::mem::ManuallyDrop;
11use core::ptr;
12use std::borrow::Borrow;
13use std::fmt::{self, Debug};
14use std::hash::{BuildHasher, Hash, Hasher};
15use std::iter::FromIterator;
16use std::ops::{Index, IndexMut};
17
18/// A trait for abstraction over different map types (Stack, Heap, Small).
19pub trait AnyMap<K, V> {
20    /// Returns the number of elements.
21    fn len(&self) -> usize;
22    /// Returns `true` if the collection is empty.
23    fn is_empty(&self) -> bool {
24        self.len() == 0
25    }
26    /// Inserts the given key-value pair or element.
27    fn insert(&mut self, key: K, value: V) -> Option<V>;
28    /// Returns a reference to the value corresponding to the key.
29    fn get<Q>(&self, key: &Q) -> Option<&V>
30    where
31        K: Borrow<Q>,
32        Q: Hash + Eq + ?Sized;
33    /// Returns a reference to the value corresponding to the key.
34    fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
35    where
36        K: Borrow<Q>,
37        Q: Hash + Eq + ?Sized;
38    /// Removes the specified element or key-value pair.
39    fn remove<Q>(&mut self, key: &Q) -> Option<V>
40    where
41        K: Borrow<Q>,
42        Q: Hash + Eq + ?Sized;
43    /// Returns `true` if the collection contains the item or key.
44    fn contains_key<Q>(&self, key: &Q) -> bool
45    where
46        K: Borrow<Q>,
47        Q: Hash + Eq + ?Sized;
48    /// Clears all elements from the collection.
49    fn clear(&mut self);
50}
51
52impl<K: Eq + Hash, V, S: BuildHasher> AnyMap<K, V> for std::collections::HashMap<K, V, S> {
53    fn len(&self) -> usize {
54        self.len()
55    }
56    fn insert(&mut self, key: K, value: V) -> Option<V> {
57        self.insert(key, value)
58    }
59    fn get<Q>(&self, key: &Q) -> Option<&V>
60    where
61        K: Borrow<Q>,
62        Q: Hash + Eq + ?Sized,
63    {
64        self.get(key)
65    }
66    fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
67    where
68        K: Borrow<Q>,
69        Q: Hash + Eq + ?Sized,
70    {
71        self.get_mut(key)
72    }
73    fn remove<Q>(&mut self, key: &Q) -> Option<V>
74    where
75        K: Borrow<Q>,
76        Q: Hash + Eq + ?Sized,
77    {
78        self.remove(key)
79    }
80    fn contains_key<Q>(&self, key: &Q) -> bool
81    where
82        K: Borrow<Q>,
83        Q: Hash + Eq + ?Sized,
84    {
85        self.contains_key(key)
86    }
87    fn clear(&mut self) {
88        self.clear();
89    }
90}
91
92impl<K: Eq + Hash, V, S: BuildHasher> AnyMap<K, V> for hashbrown::HashMap<K, V, S> {
93    fn len(&self) -> usize {
94        self.len()
95    }
96    fn insert(&mut self, key: K, value: V) -> Option<V> {
97        self.insert(key, value)
98    }
99    fn get<Q>(&self, key: &Q) -> Option<&V>
100    where
101        K: Borrow<Q>,
102        Q: Hash + Eq + ?Sized,
103    {
104        self.get(key)
105    }
106    fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
107    where
108        K: Borrow<Q>,
109        Q: Hash + Eq + ?Sized,
110    {
111        self.get_mut(key)
112    }
113    fn remove<Q>(&mut self, key: &Q) -> Option<V>
114    where
115        K: Borrow<Q>,
116        Q: Hash + Eq + ?Sized,
117    {
118        self.remove(key)
119    }
120    fn contains_key<Q>(&self, key: &Q) -> bool
121    where
122        K: Borrow<Q>,
123        Q: Hash + Eq + ?Sized,
124    {
125        self.contains_key(key)
126    }
127    fn clear(&mut self) {
128        self.clear();
129    }
130}
131
132// Use 'hashbrown' directly for the Raw Entry API (allows preventing double-hashing during spill)
133use hashbrown::HashMap;
134// Use 'fnv' to match heapless's internal hasher for consistent performance
135use fnv::FnvBuildHasher;
136// Use 'heapless' for the stack storage
137use heapless::index_map::FnvIndexMap;
138
139/// A map that lives on the stack for `N` items, then automatically spills to the heap.
140///
141/// # Overview
142/// * **Stack State:** Zero allocations. Extremely fast FNV hashing. Data is stored inline.
143/// * **Heap State:** Standard `HashMap` performance. Data is stored on the heap.
144/// * **Spill:** Occurs automatically when the stack capacity `N` is exceeded. This is a "Zero-Allocation Move"—keys/values are moved, not cloned.
145///
146/// # Capacity Constraints (`N`)
147/// Due to the underlying `heapless` implementation constraints:
148/// * `N` must be a **power of two** (e.g., 2, 4, 8, 16, 32).
149/// * `N` must be **greater than 1**.
150///
151/// **Compilation will fail if these constraints are not met.**
152pub struct SmallMap<K, V, const N: usize> {
153    /// Tracks whether data is currently in `data.stack` or `data.heap`.
154    /// This acts as the "tag" for our manual tagged union.
155    on_stack: bool,
156
157    /// The storage union. Only one field is active at a time.
158    data: MapData<K, V, N>,
159}
160
161impl<K: Eq + Hash, V, const N: usize> AnyMap<K, V> for SmallMap<K, V, N> {
162    fn len(&self) -> usize {
163        self.len()
164    }
165    fn insert(&mut self, key: K, value: V) -> Option<V> {
166        self.insert(key, value)
167    }
168    fn get<Q>(&self, key: &Q) -> Option<&V>
169    where
170        K: Borrow<Q>,
171        Q: Hash + Eq + ?Sized,
172    {
173        self.get(key)
174    }
175    fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
176    where
177        K: Borrow<Q>,
178        Q: Hash + Eq + ?Sized,
179    {
180        self.get_mut(key)
181    }
182    fn remove<Q>(&mut self, key: &Q) -> Option<V>
183    where
184        K: Borrow<Q>,
185        Q: Hash + Eq + ?Sized,
186    {
187        self.remove(key)
188    }
189    fn contains_key<Q>(&self, key: &Q) -> bool
190    where
191        K: Borrow<Q>,
192        Q: Hash + Eq + ?Sized,
193    {
194        self.contains_key(key)
195    }
196    fn clear(&mut self) {
197        self.clear();
198    }
199}
200
201/// Internal storage union.
202///
203/// We use `ManuallyDrop` because the compiler cannot know which field is active based on `on_stack`
204/// and therefore cannot automatically drop the correct one. We must handle this manually in `impl Drop`.
205union MapData<K, V, const N: usize> {
206    stack: ManuallyDrop<FnvIndexMap<K, V, N>>,
207    heap: ManuallyDrop<HashMap<K, V, FnvBuildHasher>>,
208}
209
210// --- 1. Core Implementation ---
211
212impl<K, V, const N: usize> SmallMap<K, V, N>
213where
214    K: Eq + Hash,
215{
216    /// The maximum allowed stack size in bytes (16 KB).
217    ///
218    /// Because `SmallMap` stores data inline, a large `N` or large `Key`/`Value` types
219    /// can easily exceed the thread stack size. This limit prevents that.
220    pub const MAX_STACK_SIZE: usize = 16 * 1024;
221
222    /// Creates a new empty map on the stack.
223    ///
224    /// # Compile-Time Safety Check
225    /// This function enforces a strict size limit of **16 KB** (`MAX_STACK_SIZE`).
226    ///
227    /// Because the map size is known at compile time, the compiler will **fail to build**
228    /// if the total size of `SmallMap<K, V, N>` exceeds this limit. This prevents
229    /// accidental Stack Overflows (Segfaults).
230    ///
231    /// # How to fix the build error
232    /// If your code fails to compile pointing to this assertion, you have two options:
233    /// 1. **Reduce `N`:** If you don't need that many items on the stack.
234    /// 2. **Box the Value:** Change `SmallMap<K, V, N>` to `SmallMap<K, Box<V>, N>`.
235    ///    This moves the bulk of the data to the heap immediately, keeping the stack footprint small.
236    pub fn new() -> Self {
237        const {
238            assert!(
239                std::mem::size_of::<Self>() <= Self::MAX_STACK_SIZE,
240                "SmallMap is too large! The struct size exceeds the 16KB limit. Reduce N."
241            );
242        }
243        Self {
244            on_stack: true,
245            data: MapData {
246                stack: ManuallyDrop::new(FnvIndexMap::new()),
247            },
248        }
249    }
250
251    /// Returns `true` if the map is currently storing data on the stack.
252    /// Returns `false` if it has spilled to the heap.
253    #[inline]
254    pub fn is_on_stack(&self) -> bool {
255        self.on_stack
256    }
257
258    /// Returns the number of elements in the map.
259    pub fn len(&self) -> usize {
260        unsafe {
261            // Safety: We check the `on_stack` tag to access the active union field.
262            if self.on_stack {
263                self.data.stack.len()
264            } else {
265                self.data.heap.len()
266            }
267        }
268    }
269
270    /// Returns `true` if the map contains no elements.
271    pub fn is_empty(&self) -> bool {
272        self.len() == 0
273    }
274
275    /// Clears the map, removing all key-value pairs.
276    ///
277    /// * **Stack:** Resets the index to 0.
278    /// * **Heap:** Clears the map but keeps the allocated memory for reuse.
279    pub fn clear(&mut self) {
280        unsafe {
281            if self.on_stack {
282                (*self.data.stack).clear();
283            } else {
284                (*self.data.heap).clear();
285            }
286        }
287    }
288
289    /// Inserts a key-value pair into the map.
290    ///
291    /// If the map is on the stack and full, this triggers a **Spill to Heap**.
292    /// This implementation explicitly checks capacity constraints before insertion,
293    /// ensuring a clean separation between the "Spill Decision" and the "Insert Action".
294    pub fn insert(&mut self, key: K, value: V) -> Option<V> {
295        unsafe {
296            if self.on_stack {
297                let stack_map = &mut *self.data.stack;
298
299                // Check 1: Is the map full?
300                if stack_map.len() == N {
301                    // Check 2: Is this a NEW key? (If it's an update, we fit!)
302                    // Note: We only pay the hashing cost of 'contains_key' if we are at the limit.
303                    if !stack_map.contains_key(&key) {
304                        self.spill_to_heap();
305                        // Fall through to Heap Logic below...
306                    } else {
307                        // Map is full, but we are updating an existing key. Safe to proceed on stack.
308                        return match stack_map.insert(key, value) {
309                            Ok(old_val) => old_val,
310                            Err(_) => unreachable!("Logic Error: Key exists, update must succeed"),
311                        };
312                    }
313                } else {
314                    // Map is not full. Safe to insert.
315                    return match stack_map.insert(key, value) {
316                        Ok(old_val) => old_val,
317                        Err(_) => {
318                            unreachable!("Logic Error: Capacity available, insert must succeed")
319                        }
320                    };
321                }
322            }
323
324            // Heap Logic (Standard Insert)
325            (*self.data.heap).insert(key, value)
326        }
327    }
328
329    /// Retrieves a reference to the value corresponding to the key.
330    ///
331    /// This method is generic over the key type `Q`. This allows you to lookup
332    /// values using a reference (like `&str`) without allocating a new owned
333    /// key (like `String`).
334    ///
335    /// # How it works (The `Borrow` Trait)
336    /// If the map stores keys of type `K` (e.g., `String`), you can pass a
337    /// query key of type `Q` (e.g., `str`) as long as:
338    /// 1. `K` implements `Borrow<Q>` (meaning `String` can be viewed as `str`).
339    /// 2. `Q` implements `Hash` and `Eq`.
340    /// 3. The hash of the borrowed `K` is identical to the hash of `Q`.
341    ///
342    /// # Example
343    /// ```rust
344    /// // Assuming generic import for doc test
345    /// use small_collections::SmallMap;
346    ///
347    /// let mut map = SmallMap::<String, i32, 4>::new();
348    /// map.insert("Apple".to_string(), 10);
349    ///
350    /// // Works efficiently without allocating a new String for the lookup:
351    /// assert_eq!(map.get("Apple"), Some(&10));
352    /// ```
353    pub fn get<Q>(&self, key: &Q) -> Option<&V>
354    where
355        K: Borrow<Q>,          // The Key (String) can be borrowed as Q (str)
356        Q: Hash + Eq + ?Sized, // Q is hashable and comparable (Unsized allows str)
357    {
358        unsafe {
359            if self.on_stack {
360                self.data.stack.get(key)
361            } else {
362                self.data.heap.get(key)
363            }
364        }
365    }
366
367    /// Retrieves a mutable reference to the value corresponding to the key.
368    pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
369    where
370        K: Borrow<Q>,
371        Q: Hash + Eq + ?Sized,
372    {
373        unsafe {
374            if self.on_stack {
375                (*self.data.stack).get_mut(key)
376            } else {
377                (*self.data.heap).get_mut(key)
378            }
379        }
380    }
381
382    /// Removes a key from the map, returning the value at the key if the key was previously in the map.
383    pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
384    where
385        K: Borrow<Q>,
386        Q: Hash + Eq + ?Sized,
387    {
388        unsafe {
389            if self.on_stack {
390                (*self.data.stack).remove(key)
391            } else {
392                (*self.data.heap).remove(key)
393            }
394        }
395    }
396
397    /// Returns `true` if the map contains a value for the specified key.
398    pub fn contains_key<Q>(&self, key: &Q) -> bool
399    where
400        K: Borrow<Q>,
401        Q: Hash + Eq + ?Sized,
402    {
403        unsafe {
404            if self.on_stack {
405                self.data.stack.contains_key(key)
406            } else {
407                self.data.heap.contains_key(key)
408            }
409        }
410    }
411
412    /// **The Logic Core: Stack -> Heap Migration**
413    ///
414    /// This method performs a "Zero-Cost Move" of items from Stack to Heap.
415    /// It avoids cloning keys/values and avoids re-hashing where possible.
416    ///
417    /// # Safety
418    /// This function is marked `unsafe` because it manually manages raw pointers
419    /// and union state. The caller must ensure the map is currently on the stack.
420    #[inline(never)] // Optimization: Keep this cold path out of the hot 'insert' loop
421    unsafe fn spill_to_heap(&mut self) {
422        // We explicitly wrap the body in `unsafe` to satisfy `unsafe_op_in_unsafe_fn` lint
423        unsafe {
424            // 1. "Steal" the Stack Map
425            // `ptr::read` does a bitwise copy of the map struct.
426            // We effectively own the items now. The original memory in `self.data.stack`
427            // is now considered "moved from" and should not be dropped.
428            let stack_map = ptr::read(&*self.data.stack);
429
430            // 2. Allocate Heap Map
431            // We use capacity * 2 to give breathing room after the spill.
432            // We use FnvBuildHasher to maintain consistent hashing behavior with the stack map.
433            let mut new_heap =
434                HashMap::with_capacity_and_hasher(stack_map.len() * 2, FnvBuildHasher::default());
435
436            // Cache the hasher builder to avoid cloning it in the loop
437            let hasher_builder = new_heap.hasher().clone();
438
439            // 3. Migrate Items (The Efficient Way)
440            // `into_iter()` consumes `stack_map`. Since we own it (via ptr::read),
441            // this moves the Keys and Values directly. NO CLONES occur here.
442            for (key, value) in stack_map.into_iter() {
443                // A. Re-calculate hash using the Heap Map's hasher
444                let mut hasher = hasher_builder.build_hasher();
445                key.hash(&mut hasher);
446                let hash = hasher.finish();
447
448                // B. Insert using Raw Entry API
449                // We skip the collision check and probe sequence because we know
450                // we are inserting into a fresh, empty map.
451                new_heap
452                    .raw_entry_mut()
453                    .from_key_hashed_nocheck(hash, &key)
454                    .insert(key, value);
455            }
456
457            // 4. Overwrite Union Memory
458            // CRITICAL: We use `ptr::write` to overwrite the union field.
459            // If we used simple assignment (`self.data.heap = ...`), Rust would try to
460            // Drop the *old* value at that memory address.
461            // But the old value is the `stack` map bits! Treating stack bits as a
462            // heap map pointer would cause a segfault (freeing invalid memory).
463            // `ptr::write` overwrites blindly without dropping the old garbage.
464            ptr::write(&mut self.data.heap, ManuallyDrop::new(new_heap));
465
466            // 5. Flip the Switch
467            self.on_stack = false;
468        }
469    }
470}
471
472/// Allows read access using `map[&key]`.
473///
474/// # Panics
475/// Panics if the key is not present in the map.
476impl<K, V, Q, const N: usize> Index<&Q> for SmallMap<K, V, N>
477where
478    K: Eq + Hash + Borrow<Q>,
479    Q: Eq + Hash + ?Sized,
480{
481    type Output = V;
482
483    fn index(&self, key: &Q) -> &Self::Output {
484        self.get(key).expect("no entry found for key")
485    }
486}
487
488/// Allows mutable access using `map[&key] = new_value`.
489///
490/// # Panics
491/// Panics if the key is not present in the map.
492impl<K, V, Q, const N: usize> IndexMut<&Q> for SmallMap<K, V, N>
493where
494    K: Eq + Hash + Borrow<Q>,
495    Q: Eq + Hash + ?Sized,
496{
497    fn index_mut(&mut self, key: &Q) -> &mut Self::Output {
498        self.get_mut(key).expect("no entry found for key")
499    }
500}
501
502// Add this to src/map.rs
503
504// Manual implementation of Clone.
505// We must check `on_stack` to know which field to clone.
506impl<K, V, const N: usize> Clone for SmallMap<K, V, N>
507where
508    K: Eq + Hash + Clone,
509    V: Clone,
510{
511    fn clone(&self) -> Self {
512        unsafe {
513            if self.on_stack {
514                // Clone the Stack Map
515                let stack_clone = (*self.data.stack).clone();
516                SmallMap {
517                    on_stack: true,
518                    data: MapData {
519                        stack: ManuallyDrop::new(stack_clone),
520                    },
521                }
522            } else {
523                // Clone the Heap Map
524                let heap_clone = (*self.data.heap).clone();
525                SmallMap {
526                    on_stack: false,
527                    data: MapData {
528                        heap: ManuallyDrop::new(heap_clone),
529                    },
530                }
531            }
532        }
533    }
534}
535
536// --- 2. Entry API Support ---
537
538impl<K, V, const N: usize> SmallMap<K, V, N>
539where
540    K: Eq + Hash,
541{
542    /// Gets the given key's corresponding entry in the map for in-place manipulation.
543    pub fn entry(&mut self, key: K) -> SmallMapEntry<'_, K, V, N> {
544        unsafe {
545            if self.on_stack {
546                // Safety Pre-Check:
547                // If we return a Stack Entry, user might call `.or_insert()`.
548                // If the map is full, `heapless` panics on `or_insert`.
549                // We must predict this: If full AND key is new, we spill NOW.
550                let needs_spill = {
551                    let stack_map = &mut self.data.stack;
552                    stack_map.len() == N && !stack_map.contains_key(&key)
553                };
554
555                if needs_spill {
556                    self.spill_to_heap();
557                    // Fall through to Heap logic below
558                } else {
559                    // Safe to return Stack Entry
560                    return SmallMapEntry::Stack((*self.data.stack).entry(key));
561                }
562            }
563            // Heap Logic
564            SmallMapEntry::Heap((*self.data.heap).entry(key))
565        }
566    }
567}
568
569/// A wrapper enum that unifies Stack and Heap entries.
570pub enum SmallMapEntry<'a, K, V, const N: usize> {
571    /// Automatically generated documentation for this item.
572    Stack(heapless::index_map::Entry<'a, K, V, N>),
573    /// Automatically generated documentation for this item.
574    Heap(hashbrown::hash_map::Entry<'a, K, V, FnvBuildHasher>),
575}
576
577impl<'a, K, V, const N: usize> SmallMapEntry<'a, K, V, N>
578where
579    K: Eq + Hash,
580{
581    /// Automatically generated documentation for this item.
582    pub fn or_insert(self, default: V) -> &'a mut V {
583        match self {
584            // We use expect() because SmallMap::entry() guarantees capacity
585            // exists if it returns a Stack variant.
586            SmallMapEntry::Stack(e) => {
587                // We handle the Result manually to avoid `unwrap()`/`expect()`.
588                // This removes the need for V: Debug.
589                match e.or_insert(default) {
590                    Ok(v) => v,
591                    // We ignore the error payload (_) so we don't need to print it.
592                    Err(_) => {
593                        unreachable!("Logic Error: Stack map capacity check failed in entry()")
594                    }
595                }
596            }
597            SmallMapEntry::Heap(e) => e.or_insert(default),
598        }
599    }
600
601    /// Automatically generated documentation for this item.
602    pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
603        match self {
604            SmallMapEntry::Stack(e) => match e.or_insert_with(default) {
605                Ok(v) => v,
606                Err(_) => unreachable!("Logic Error: Stack map capacity check failed in entry()"),
607            },
608            SmallMapEntry::Heap(e) => e.or_insert_with(default),
609        }
610    }
611
612    /// Automatically generated documentation for this item.
613    pub fn and_modify<F: FnOnce(&mut V)>(self, f: F) -> Self {
614        match self {
615            SmallMapEntry::Stack(e) => SmallMapEntry::Stack(e.and_modify(f)),
616            SmallMapEntry::Heap(e) => SmallMapEntry::Heap(e.and_modify(f)),
617        }
618    }
619
620    /// Automatically generated documentation for this item.
621    pub fn key(&self) -> &K {
622        match self {
623            SmallMapEntry::Stack(e) => e.key(),
624            SmallMapEntry::Heap(e) => e.key(),
625        }
626    }
627}
628
629// --- 3. Iterator Support ---
630
631impl<K, V, const N: usize> SmallMap<K, V, N>
632where
633    K: Eq + Hash,
634{
635    /// Returns an iterator over the map.
636    pub fn iter(&self) -> SmallMapIter<'_, K, V> {
637        unsafe {
638            if self.on_stack {
639                SmallMapIter::Stack(self.data.stack.iter())
640            } else {
641                SmallMapIter::Heap(self.data.heap.iter())
642            }
643        }
644    }
645}
646
647/// Wrapper for iterators to hide the underlying type difference.
648pub enum SmallMapIter<'a, K, V> {
649    /// Automatically generated documentation for this item.
650    Stack(heapless::index_map::Iter<'a, K, V>),
651    /// Automatically generated documentation for this item.
652    Heap(hashbrown::hash_map::Iter<'a, K, V>),
653}
654
655impl<'a, K, V> Iterator for SmallMapIter<'a, K, V> {
656    type Item = (&'a K, &'a V);
657    fn next(&mut self) -> Option<Self::Item> {
658        match self {
659            SmallMapIter::Stack(i) => i.next(),
660            SmallMapIter::Heap(i) => i.next(),
661        }
662    }
663}
664
665// --- 4. Trait Implementations ---
666
667// Safety: ManuallyDrop fields inside Union are NOT dropped automatically.
668// We must check `on_stack` and drop the correct field manually.
669impl<K, V, const N: usize> Drop for SmallMap<K, V, N> {
670    fn drop(&mut self) {
671        unsafe {
672            if self.on_stack {
673                ManuallyDrop::drop(&mut self.data.stack);
674            } else {
675                ManuallyDrop::drop(&mut self.data.heap);
676            }
677        }
678    }
679}
680
681// Default (Allows SmallMap::default())
682impl<K: Eq + Hash, V, const N: usize> Default for SmallMap<K, V, N> {
683    fn default() -> Self {
684        Self::new()
685    }
686}
687
688// Debug (Allows println!("{:?}", map))
689// Note: We require V: Debug here to print values
690impl<K: Debug + Eq + Hash, V: Debug, const N: usize> Debug for SmallMap<K, V, N> {
691    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
692        f.debug_map().entries(self.iter()).finish()
693    }
694}
695
696impl<K, V, const N: usize, M> PartialEq<M> for SmallMap<K, V, N>
697where
698    K: Eq + Hash,
699    V: PartialEq,
700    M: AnyMap<K, V>,
701{
702    fn eq(&self, other: &M) -> bool {
703        if self.len() != other.len() {
704            return false;
705        }
706        self.iter().all(|(k, v)| other.get(k) == Some(v))
707    }
708}
709
710impl<K, V, const N: usize> Eq for SmallMap<K, V, N>
711where
712    K: Eq + Hash,
713    V: Eq,
714{
715}
716
717// FromIterator (Allows .collect())
718impl<K, V, const N: usize> FromIterator<(K, V)> for SmallMap<K, V, N>
719where
720    K: Eq + Hash,
721{
722    fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
723        let mut map = SmallMap::new();
724        for (k, v) in iter {
725            map.insert(k, v);
726        }
727        map
728    }
729}
730
731// IntoIterator (Allows 'for (k,v) in map')
732impl<K: Eq + Hash, V, const N: usize> IntoIterator for SmallMap<K, V, N> {
733    type Item = (K, V);
734    type IntoIter = SmallMapIntoIter<K, V, N>;
735
736    fn into_iter(self) -> Self::IntoIter {
737        // We need to move out of self.
738        // We read 'self' bitwise, then forget the original so Drop doesn't run.
739        let this = ManuallyDrop::new(self);
740        unsafe {
741            if this.on_stack {
742                // ptr::read copies the stack map out, then we iterate it
743                SmallMapIntoIter::Stack(ptr::read(&*this.data.stack).into_iter())
744            } else {
745                // ptr::read copies the heap map out
746                SmallMapIntoIter::Heap(ptr::read(&*this.data.heap).into_iter())
747            }
748        }
749    }
750}
751
752/// Wrapper for owning iterators
753pub enum SmallMapIntoIter<K, V, const N: usize> {
754    /// Automatically generated documentation for this item.
755    Stack(heapless::index_map::IntoIter<K, V, N>),
756    /// Automatically generated documentation for this item.
757    Heap(hashbrown::hash_map::IntoIter<K, V>),
758}
759
760impl<K, V, const N: usize> Iterator for SmallMapIntoIter<K, V, N> {
761    type Item = (K, V);
762    fn next(&mut self) -> Option<Self::Item> {
763        match self {
764            SmallMapIntoIter::Stack(i) => i.next(),
765            SmallMapIntoIter::Heap(i) => i.next(),
766        }
767    }
768}
769
770// --- 5. Test Suite ---
771
772#[cfg(test)]
773mod tests {
774    use super::*;
775
776    // --- Basic Stack Operations ---
777    #[test]
778    fn test_map_stack_ops_basic() {
779        let mut map: SmallMap<i32, i32, 4> = SmallMap::new();
780
781        assert!(map.is_empty());
782        assert!(map.is_on_stack());
783
784        map.insert(1, 10);
785        map.insert(2, 20);
786
787        assert_eq!(map.len(), 2);
788        assert_eq!(map.get(&1), Some(&10));
789        assert_eq!(map.get(&2), Some(&20));
790        assert_eq!(map.get(&99), None);
791
792        // Ensure we haven't spilled yet
793        assert!(map.is_on_stack());
794    }
795
796    // --- The Critical Spill Test ---
797    #[test]
798    fn test_map_spill_trigger_on_insert() {
799        // Capacity is strictly 2
800        let mut map: SmallMap<String, String, 2> = SmallMap::new();
801
802        map.insert("Key1".into(), "Val1".into());
803        map.insert("Key2".into(), "Val2".into());
804
805        // Still on stack (2/2)
806        assert!(map.is_on_stack());
807
808        // TRIGGER SPILL: Insert 3rd item
809        // This should trigger: ptr::read(stack) -> alloc heap -> move items -> ptr::write(union)
810        map.insert("Key3".into(), "Val3".into());
811
812        // 1. Check State Change
813        assert!(!map.is_on_stack(), "Map should have spilled to heap");
814
815        // 2. Check Data Integrity (Did previous items survive?)
816        assert_eq!(map.get("Key1"), Some(&"Val1".to_string()));
817        assert_eq!(map.get("Key2"), Some(&"Val2".to_string()));
818        assert_eq!(map.get("Key3"), Some(&"Val3".to_string()));
819        assert_eq!(map.len(), 3);
820    }
821
822    // --- Heap Operations (Post-Spill) ---
823    #[test]
824    fn test_map_any_storage_heap_ops() {
825        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
826
827        // Fill and Spill
828        map.insert(1, 10);
829        map.insert(2, 20);
830        map.insert(3, 30); // Spilled
831
832        // Continue working on Heap
833        map.insert(4, 40);
834        map.remove(&1); // Remove from Heap
835
836        assert_eq!(map.len(), 3);
837        assert_eq!(map.get(&1), None);
838        assert_eq!(map.get(&4), Some(&40));
839        assert!(!map.is_on_stack());
840    }
841
842    // --- Overwriting Values ---
843    #[test]
844    fn test_map_any_storage_overwrite() {
845        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
846
847        // Stack Overwrite
848        map.insert(1, 10);
849        map.insert(1, 99);
850        assert_eq!(map.get(&1), Some(&99));
851
852        // Spill
853        map.insert(2, 20);
854        map.insert(3, 30);
855
856        // Heap Overwrite
857        map.insert(1, 1000);
858        assert_eq!(map.get(&1), Some(&1000));
859    }
860
861    // --- Entry API: Basic & Modify ---
862    #[test]
863    fn test_map_any_storage_entry_api_basic() {
864        let mut map: SmallMap<&str, i32, 4> = SmallMap::new();
865
866        // or_insert (New Key)
867        map.entry("A").or_insert(1);
868        assert_eq!(map.get("A"), Some(&1));
869
870        // or_insert (Existing Key)
871        map.entry("A").or_insert(999);
872        assert_eq!(map.get("A"), Some(&1)); // Should not change
873
874        // and_modify
875        map.entry("A").and_modify(|v| *v += 10);
876        assert_eq!(map.get("A"), Some(&11));
877    }
878
879    // --- Entry API: Spill Edge Case ---
880    #[test]
881    fn test_map_spill_trigger_on_entry() {
882        let mut map: SmallMap<&str, i32, 2> = SmallMap::new();
883        map.insert("A", 1);
884        map.insert("B", 2);
885
886        // Map is full (2/2).
887        // Calling .entry("C") checks capacity -> finds full -> spills -> returns Heap Entry
888        // If we didn't handle this, heapless would panic inside or_insert.
889        let val = map.entry("C").or_insert(3);
890        *val += 10;
891
892        assert!(!map.is_on_stack());
893        assert_eq!(map.get("C"), Some(&13));
894    }
895
896    // --- Iterators & Traits ---
897    #[test]
898    fn test_map_traits_iterators() {
899        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
900        map.insert(1, 10);
901        map.insert(2, 20);
902        map.insert(3, 30); // Spill
903
904        // Test Reference Iterator
905        let mut sum = 0;
906        for (k, v) in map.iter() {
907            sum += k + v;
908        }
909        assert_eq!(sum, (1 + 10) + (2 + 20) + (3 + 30));
910
911        // Test FromIterator (Collect)
912        let collected: SmallMap<i32, i32, 2> = vec![(1, 1), (2, 2), (3, 3)].into_iter().collect();
913        assert_eq!(collected.len(), 3);
914        assert!(!collected.is_on_stack());
915
916        // Test Debug (ensure V: Debug logic works)
917        let debug_str = format!("{:?}", collected);
918        assert!(debug_str.contains("1: 1"));
919
920        // Test IntoIterator (Consuming)
921        let vec: Vec<(i32, i32)> = collected.into_iter().collect();
922        assert_eq!(vec.len(), 3);
923    }
924
925    // --- Minimum valid size is 2 Edge Case ---
926    #[test]
927    fn test_map_stack_minimum_capacity() {
928        // heapless requires N >= 2 and Power of Two
929        // This test ensures the library handles the smallest possible stack size correctly
930        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
931
932        map.insert(1, 1);
933        map.insert(2, 2);
934        assert!(map.is_on_stack()); // Holds 2 items
935
936        map.insert(3, 3);
937        assert!(!map.is_on_stack()); // Spills on 3rd
938    }
939
940    // --- Clear Operation ---
941    #[test]
942    fn test_map_any_storage_clear() {
943        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
944
945        // Clear Stack
946        map.insert(1, 1);
947        map.clear();
948        assert!(map.is_empty());
949        assert!(map.is_on_stack());
950
951        // Clear Heap
952        map.insert(1, 1);
953        map.insert(2, 2);
954        map.insert(3, 3); // Spill
955        map.clear();
956        assert!(map.is_empty());
957        assert!(!map.is_on_stack()); // Remains on heap, just empty
958    }
959
960    #[test]
961    fn test_map_static_size_guard() {
962        // 1. Small Map (Standard use case)
963        let _small: SmallMap<i32, i32, 4> = SmallMap::new();
964
965        // 2. Medium Map (Pushing the limit but safe)
966        // Struct = 100 bytes. N = 64. Total ~6.4 KB.
967        // This is < 16 KB, so it must compile and run.
968        #[allow(dead_code)]
969        struct MediumStruct([u8; 100]);
970
971        // We verify that this instantiation does NOT panic/fail to build.
972        let _medium: SmallMap<i32, MediumStruct, 32> = SmallMap::new();
973    }
974
975    #[test]
976    fn test_map_traits_index_read() {
977        let mut map: SmallMap<i32, i32, 4> = SmallMap::new();
978        map.insert(1, 10);
979        map.insert(2, 20);
980
981        // Read using Index syntax
982        assert_eq!(map[&1], 10);
983        assert_eq!(map[&2], 20);
984    }
985
986    #[test]
987    fn test_map_traits_index_assign() {
988        let mut map: SmallMap<&str, i32, 4> = SmallMap::new();
989        map.insert("A", 10);
990
991        // Modify existing value using IndexMut syntax
992        map[&"A"] = 999;
993
994        assert_eq!(map.get("A"), Some(&999));
995        assert_eq!(map[&"A"], 999);
996    }
997
998    #[test]
999    fn test_map_traits_index_borrowing() {
1000        // Demonstrate using &str to index a Map<String, _>
1001        let mut map: SmallMap<String, i32, 4> = SmallMap::new();
1002        map.insert("Apple".to_string(), 100);
1003
1004        // We can use string literal "Apple" directly
1005        assert_eq!(map["Apple"], 100);
1006
1007        // Mutate
1008        map["Apple"] = 200;
1009        assert_eq!(map["Apple"], 200);
1010    }
1011
1012    #[test]
1013    #[should_panic(expected = "no entry found for key")]
1014    fn test_map_traits_index_panic() {
1015        let map: SmallMap<i32, i32, 4> = SmallMap::new();
1016        // This should panic
1017        let _val = map[&999];
1018    }
1019
1020    #[test]
1021    fn test_map_any_storage_heap_manipulation() {
1022        let mut map: SmallMap<i32, i32, 2> = vec![(1, 10), (2, 20), (3, 30)].into_iter().collect();
1023        assert!(!map.is_on_stack());
1024
1025        // heap get_mut
1026        if let Some(v) = map.get_mut(&1) {
1027            *v = 11;
1028        }
1029        assert_eq!(map[&1], 11);
1030
1031        // heap remove
1032        assert_eq!(map.remove(&2), Some(20));
1033        assert_eq!(map.len(), 2);
1034    }
1035
1036    #[test]
1037    fn test_map_any_storage_clone_heap() {
1038        let map: SmallMap<i32, i32, 2> = vec![(1, 10), (2, 20), (3, 30)].into_iter().collect();
1039        let cloned = map.clone();
1040        assert_eq!(cloned.len(), 3);
1041        assert!(!cloned.is_on_stack());
1042    }
1043
1044    #[test]
1045    fn test_map_traits_debug_display() {
1046        let map: SmallMap<i32, i32, 2> = vec![(1, 11)].into_iter().collect();
1047        let debug = format!("{:?}", map);
1048        assert!(debug.contains("1: 11"));
1049    }
1050
1051    #[test]
1052    fn test_map_traits_entry_or_insert_with() {
1053        let mut map2: SmallMap<i32, i32, 4> = SmallMap::new();
1054        map2.entry(1).or_insert_with(|| 100);
1055        assert_eq!(map2[&1], 100);
1056
1057        // heap entry or_insert_with
1058        let mut map_h: SmallMap<i32, i32, 2> = vec![(1, 1), (2, 2), (3, 3)].into_iter().collect();
1059        map_h.entry(4).or_insert_with(|| 400);
1060        assert_eq!(map_h[&4], 400);
1061    }
1062
1063    #[test]
1064    fn test_map_traits_entry_key() {
1065        let mut map3: SmallMap<i32, i32, 4> = SmallMap::new();
1066        let entry = map3.entry(1);
1067        assert_eq!(entry.key(), &1);
1068        entry.or_insert(10);
1069
1070        let mut map4: SmallMap<i32, i32, 2> = vec![(1, 1), (2, 2), (3, 3)].into_iter().collect();
1071        let entry_h = map4.entry(5);
1072        assert_eq!(entry_h.key(), &5);
1073    }
1074
1075    #[test]
1076    fn test_map_traits_equality_interop() {
1077        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
1078        map.insert(1, 10);
1079        map.insert(2, 20);
1080
1081        // Compare with std::collections::HashMap
1082        let mut std_map = std::collections::HashMap::new();
1083        std_map.insert(1, 10);
1084        std_map.insert(2, 20);
1085
1086        assert_eq!(map, std_map);
1087    }
1088}
1089
1090#[cfg(test)]
1091mod map_coverage_tests {
1092    use super::*;
1093
1094    fn run_any_map_test<M: AnyMap<i32, i32>>(any_map: &mut M) {
1095        assert_eq!(any_map.len(), 0);
1096        assert!(any_map.is_empty());
1097        any_map.insert(1, 10);
1098        assert_eq!(any_map.get(&1), Some(&10));
1099        assert_eq!(any_map.get_mut(&1), Some(&mut 10));
1100        assert!(any_map.contains_key(&1));
1101        assert_eq!(any_map.remove(&1), Some(10));
1102        any_map.insert(2, 20);
1103        any_map.clear();
1104        assert_eq!(any_map.len(), 0);
1105    }
1106
1107    #[test]
1108    fn test_any_map_trait_impls() {
1109        // std::collections::HashMap
1110        let mut std_map: std::collections::HashMap<i32, i32> = std::collections::HashMap::new();
1111        run_any_map_test(&mut std_map);
1112
1113        // hashbrown::HashMap
1114        let mut hb_map: hashbrown::HashMap<i32, i32> = hashbrown::HashMap::new();
1115        run_any_map_test(&mut hb_map);
1116
1117        // SmallMap
1118        let mut small_map: SmallMap<i32, i32, 2> = SmallMap::new();
1119        run_any_map_test(&mut small_map);
1120    }
1121
1122    #[test]
1123    fn test_small_map_insert_heap_branch() {
1124        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
1125        map.insert(1, 10);
1126        map.insert(2, 20);
1127        map.insert(3, 30); // spill
1128
1129        let old = map.insert(3, 300); // heap insert branch
1130        assert_eq!(old, Some(30));
1131    }
1132
1133    #[test]
1134    fn test_small_map_partial_eq_length() {
1135        let mut m1: SmallMap<i32, i32, 2> = SmallMap::new();
1136        m1.insert(1, 10);
1137
1138        let mut m2: SmallMap<i32, i32, 2> = SmallMap::new();
1139        m2.insert(1, 10);
1140        m2.insert(2, 20);
1141
1142        assert_ne!(m1, m2);
1143    }
1144
1145    #[test]
1146    fn test_small_map_spill_duplicate_insert() {
1147        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
1148        map.insert(1, 10);
1149        map.insert(2, 20);
1150        map.insert(2, 200); // map full, but updating existing key so safe
1151        assert_eq!(map.len(), 2);
1152        assert!(map.is_on_stack());
1153    }
1154
1155    #[test]
1156    fn test_small_map_index_traits() {
1157        let mut map: SmallMap<i32, i32, 2> = SmallMap::new();
1158        map.insert(1, 10);
1159        assert_eq!(map[&1], 10);
1160        map[&1] = 100;
1161        assert_eq!(map.get(&1), Some(&100));
1162    }
1163}