robinxx_map 0.1.0

High-performance, thread-safe open-addressing hash map using Robin Hood displacement & xxHash3.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
//! Low-level `RawTable` implementation: probing, insert, erase, resize, and `DfH` logic.
//!
//! Contains all `unsafe` block invariants for index masking, prefetching, and
//! contiguous `SoA` memory manipulation. Public API surface is strictly safe and
//! confined to this module's internal boundaries.
//!
//! # Memory Layout
//! ```text
//! [ Meta(u8) | Padding... | Keys(K) | Padding... | Values(V) | Padding... ]
//! ^ 64B aligned
//! ```
//! Each section is padded to 64-byte boundaries to guarantee SIMD-friendly stride
//! and cache-line alignment for auto-vectorized probing loops.
//!
//! # Robin Hood Displacement
//! Entries with higher Distance from Home (`DfH`) values are prioritized during probing.
//! When inserting, if a slot with lower `DfH` is encountered, the new entry displaces it
//! and continues probing with the displaced entry. This bounds worst-case probe chains.
//!
//! # Safety Guarantees
//! - All public methods are safe; `unsafe` is confined to allocation, pointer arithmetic,
//!   and memory initialization with documented invariants.
//! - Keys/Values are only accessed where `meta[i] > 0`, guaranteeing initialized memory.
//! - Drop semantics respect Rust move semantics: initialized slots are dropped before
//!   deallocation.

use crate::hash::RobinHoodKey;
use crate::memory;
use core::marker::PhantomData;
use core::mem;
use core::ptr::{self, NonNull};

/// Raw, unguarded open-addressing hash table using Robin Hood displacement.
///
/// # Summary
/// Low-level hash table with Robin Hood probing, `SoA` layout, and manual memory management.
///
/// # Description
/// `RawTable` implements an open-addressing hash table with linear probing and
/// Distance from Home (`DfH`) tracking. Keys and values are stored in parallel
/// 64-byte aligned arrays (Structure of Arrays) for cache-friendly access and
/// auto-vectorization potential.
///
/// Insertion uses Robin Hood displacement: entries with higher `DfH` values are
/// prioritized, and new entries displace those with lower `DfH` to bound probe chains.
/// Deletion uses backward shift to eliminate tombstones and maintain probe continuity.
///
/// # Invariants
/// - `capacity` is a power of two and `> 0`.
/// - `mask == capacity - 1` for bitwise index masking.
/// - `base_ptr`, `meta_ptr`, `keys_ptr`, `vals_ptr` point to valid, aligned, SoA-allocated memory.
/// - `meta[i] == 0` denotes an empty slot. `meta[i] > 0` denotes occupied with `DfH = meta[i] - 1`.
/// - `size <= capacity * 0.8` before `insert` triggers resize.
/// - Keys/Values are only initialized where `meta[i] > 0`.
///
/// # Examples
/// ```
/// use robinxx_map::hash::RobinHoodKey;
/// use robinxx_map::table::RawTable;
///
/// // RawTable requires K: RobinHoodKey + PartialEq
/// let mut table: RawTable<u64, String> = RawTable::with_capacity(8);
/// assert!(table.insert(42u64, String::from("answer")));
/// assert_eq!(table.get(&42u64), Some(&String::from("answer")));
/// assert!(!table.insert(42u64, String::from("duplicate"))); // Key exists
/// assert!(table.remove(&42u64));
/// assert_eq!(table.get(&42u64), None);
/// ```
///
/// # Panics
/// - `with_capacity` panics if allocation fails (OOM) via `handle_alloc_error`.
/// - `insert` panics if resize allocation fails.
/// - All methods assume `K: PartialEq` for key comparison; violating this at call site
///   causes undefined behavior (enforced by trait bounds on `impl` block).
///
/// # Errors
/// Not applicable. Public methods return `bool` or `Option` for infallible semantics.
/// Allocation failures panic via `handle_alloc_error` per `no_std` conventions.
///
/// # Safety
/// - `unsafe impl Send/Sync` is sound because `RawTable` owns all data and uses
///   global allocator; thread-safety is enforced at the `RobinHoodMap` layer.
/// - `Drop` iterates only where `meta[i] > 0`, guaranteeing initialized memory for
///   `ptr::drop_in_place`.
/// - Pointer arithmetic is bounded by `mask` and `capacity`; indices are always
///   valid due to power-of-two capacity and bitwise masking.
///
/// # See Also
/// - [`RobinHoodMap`](crate::map::RobinHoodMap) for the safe, public API wrapper.
/// - [`memory`] for `SoA` allocation primitives.
/// - [`hash::RobinHoodKey`](crate::hash::RobinHoodKey) for the hashing trait.
pub struct RawTable<K, V> {
    /// Base pointer to the allocated `SoA` memory block.
    base_ptr: NonNull<u8>,
    /// Pointer to the metadata array (`DfH` values).
    pub(crate) meta_ptr: *mut u8,
    /// Pointer to the keys array.
    pub(crate) keys_ptr: *mut K,
    /// Pointer to the values array.
    pub(crate) vals_ptr: *mut V,
    /// Total allocated slot count (power of two).
    pub(crate) capacity: usize,
    /// Number of occupied slots.
    size: usize,
    /// Bitmask for index wrapping: `mask == capacity - 1`.
    mask: usize,
    /// Byte offset from `base_ptr` to the keys array.
    keys_offset: usize,
    /// Byte offset from `base_ptr` to the values array.
    vals_offset: usize,
    /// Phantom marker for variance and drop-check.
    _marker: PhantomData<(K, V)>,
}

// SAFETY: `RawTable` manually manages `Send`/`Sync` via owned data and global allocator.
// No interior mutability; all mutation requires `&mut self`. Thread-safety is enforced
// at the `RobinHoodMap` layer via `SpinMutex`.
unsafe impl<K: Send, V: Send> Send for RawTable<K, V> {}
unsafe impl<K: Sync, V: Sync> Sync for RawTable<K, V> {}

// Trait bounds added here for methods that require key comparison and hashing.
// Struct itself remains unbounded to allow `Drop` and basic operations without constraints.
impl<K: RobinHoodKey + PartialEq, V> RawTable<K, V> {
    /// Allocates an empty `RawTable` with default capacity.
    ///
    /// # Summary
    /// Creates a new table with initial capacity of 16 slots.
    ///
    /// # Description
    /// Convenience constructor that delegates to `with_capacity(16)`. Suitable for
    /// small tables or when exact capacity is unknown upfront.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let table: RawTable<u32, String> = RawTable::new();
    /// assert_eq!(table.capacity(), 16);
    /// assert!(table.is_empty());
    /// ```
    ///
    /// # Panics
    /// Panics if allocation fails (OOM) via `handle_alloc_error`.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable. Public safe constructor.
    ///
    /// # See Also
    /// - [`with_capacity`](Self::with_capacity) for custom initial capacity.
    #[inline]
    #[must_use]
    pub fn new() -> Self {
        Self::with_capacity(16)
    }

    /// Allocates an empty `RawTable` with at least the specified capacity.
    ///
    /// # Summary
    /// Creates a new table rounded up to the nearest power-of-two capacity.
    ///
    /// # Description
    /// Allocates a `SoA` memory block with capacity rounded up to the nearest power
    /// of two. Metadata is zero-initialized; keys/values regions are uninitialized
    /// and will be initialized on first insert.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let table: RawTable<&str, i32> = RawTable::with_capacity(100);
    /// assert_eq!(table.capacity(), 128); // Rounded up to power of two
    /// ```
    ///
    /// # Panics
    /// Panics if allocation fails (OOM) via `handle_alloc_error`.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Caller must ensure `K` and `V` satisfy `RobinHoodKey + PartialEq` at call site.
    /// The returned table is valid for all safe operations defined in this `impl` block.
    ///
    /// # See Also
    /// - [`new`](Self::new) for default capacity.
    /// - [`reserve`](Self::reserve) for post-construction capacity adjustment.
    #[must_use]
    pub fn with_capacity(mut capacity: usize) -> Self {
        if capacity == 0 {
            capacity = 1;
        }
        let capacity = capacity.next_power_of_two();
        let k_size = mem::size_of::<K>();
        let v_size = mem::size_of::<V>();
        // SAFETY: capacity is power-of-two > 0. sizes are compile-time constants.
        // `allocate_block` returns a valid, aligned, zero-initialized metadata region.
        let base = unsafe { memory::allocate_block(capacity, k_size, v_size) };
        let (_, keys_offset, vals_offset) = memory::calc_layout(capacity, k_size, v_size);
        // SAFETY: layout matches allocation parameters, pointers derived from valid base.
        // Offsets are computed by `calc_layout` and guaranteed within bounds.
        let (meta_ptr, keys_ptr, vals_ptr) =
            unsafe { memory::get_array_ptrs::<K, V>(base, keys_offset, vals_offset) };
        Self {
            base_ptr: base,
            meta_ptr,
            keys_ptr,
            vals_ptr,
            capacity,
            size: 0,
            mask: capacity - 1,
            keys_offset,
            vals_offset,
            _marker: PhantomData,
        }
    }

    /// Returns the number of occupied slots.
    ///
    /// # Summary
    /// Retrieves the current element count in O(1) time.
    ///
    /// # Description
    /// Returns the internal `size` counter, which is maintained on every insert/remove.
    /// Does not traverse the table; constant-time operation.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<u64, bool> = RawTable::new();
    /// assert_eq!(table.size(), 0);
    /// table.insert(1u64, true);
    /// assert_eq!(table.size(), 1);
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable. Read-only access to internal counter.
    ///
    /// # See Also
    /// - [`capacity`](Self::capacity) for total slot count.
    /// - [`is_empty`](Self::is_empty) for boolean emptiness check.
    #[inline]
    pub(crate) fn size(&self) -> usize {
        self.size
    }

    /// Returns the total allocated capacity.
    ///
    /// # Summary
    /// Retrieves the total slot count (power of two) in O(1) time.
    ///
    /// # Description
    /// Returns the `capacity` field, which is always a power of two due to
    /// `next_power_of_two` rounding in constructors and `resize`.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let table: RawTable<String, Vec<u8>> = RawTable::with_capacity(50);
    /// assert_eq!(table.capacity(), 64);
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable. Read-only access to internal field.
    ///
    /// # See Also
    /// - [`size`](Self::size) for occupied slot count.
    /// - [`reserve`](Self::reserve) to ensure minimum capacity.
    #[inline]
    pub(crate) fn capacity(&self) -> usize {
        self.capacity
    }

    /// Checks if the table is empty.
    ///
    /// # Summary
    /// Returns `true` if no elements are stored.
    ///
    /// # Description
    /// Convenience method equivalent to `size() == 0`. Useful for conditional logic
    /// without exposing the internal counter directly.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<i32, f64> = RawTable::new();
    /// assert!(table.is_empty());
    /// table.insert(42, 3.14);
    /// assert!(!table.is_empty());
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable.
    ///
    /// # See Also
    /// - [`size`](Self::size) for exact element count.
    #[inline]
    pub(crate) fn is_empty(&self) -> bool {
        self.size == 0
    }

    /// Inserts a key-value pair using Robin Hood displacement.
    ///
    /// # Summary
    /// Stores a key-value pair; returns `true` if newly inserted, `false` if duplicate.
    ///
    /// # Description
    /// Computes the key's xxHash3 digest, probes linearly with `DfH` tracking, and
    /// displaces entries with lower `DfH` to bound probe chains. Triggers resize at
    /// 80% load factor to maintain amortized O(1) performance.
    ///
    /// Duplicate keys are not updated; the existing value is preserved and `false`
    /// is returned. To update, use `get_mut` followed by in-place mutation.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<&str, i32> = RawTable::new();
    /// assert!(table.insert("key", 100));      // Insert new
    /// assert!(!table.insert("key", 200));     // Duplicate, no update
    /// assert_eq!(table.get(&"key"), Some(&100)); // Original value preserved
    /// ```
    ///
    /// # Panics
    /// Panics if allocation fails during resize (OOM) via `handle_alloc_error`.
    ///
    /// # Errors
    /// Not applicable. Returns `bool` for infallible semantics.
    ///
    /// # Safety
    /// Caller must ensure `K: PartialEq` for correct duplicate detection.
    /// The table maintains ownership of inserted keys/values; dropping the table
    /// will drop all stored elements.
    ///
    /// # See Also
    /// - [`get`](Self::get) for immutable lookup.
    /// - [`get_mut`](Self::get_mut) for mutable lookup and in-place update.
    /// - [`remove`](Self::remove) for deletion.
    /// - [`reserve`](Self::reserve) to pre-allocate capacity and avoid mid-insert resize.
    #[inline]
    pub fn insert(&mut self, key: K, value: V) -> bool {
        if self.size * 5 >= self.capacity * 4 {
            self.resize();
        }

        let hash = key.hash_xxh3();
        let mut idx = usize::try_from(hash).unwrap() & self.mask;
        let mut dfh: u8 = 0;
        let mut current_key = key;
        let mut current_val = value;

        unsafe {
            loop {
                let meta_val = *self.meta_ptr.add(idx);
                if meta_val == 0 {
                    ptr::write(self.keys_ptr.add(idx), current_key);
                    ptr::write(self.vals_ptr.add(idx), current_val);
                    *self.meta_ptr.add(idx) = dfh + 1;
                    self.size += 1;
                    return true;
                }

                if meta_val < dfh + 1 {
                    let old_key = ptr::replace(self.keys_ptr.add(idx), current_key);
                    let old_val = ptr::replace(self.vals_ptr.add(idx), current_val);
                    current_key = old_key;
                    current_val = old_val;
                    // Record the new entry's DfH at this slot.
                    *self.meta_ptr.add(idx) = dfh + 1;
                    // Displaced entry had DfH = meta_val - 1 here; at next slot it is meta_val.
                    // After dfh += 1 below, dfh will equal meta_val. ✓
                    dfh = meta_val - 1;
                } else if meta_val == dfh + 1 && *self.keys_ptr.add(idx) == current_key {
                    drop(current_key);
                    drop(current_val);
                    return false;
                }

                idx = (idx + 1) & self.mask;
                dfh = dfh.checked_add(1).expect(
                    "DfH overflow: probe chain exceeded 255. \
                    This indicates a hash distribution problem or adversarial input.",
                );
            }
        }
    }

    /// Finds an immutable reference to the value associated with `key`.
    ///
    /// # Summary
    /// Returns `Some(&V)` if key exists, `None` otherwise.
    ///
    /// # Description
    /// Probes linearly using the key's xxHash3 digest and `DfH` bounds. Stops at the
    /// first matching key or when an empty slot or `DfH` violation is encountered.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<u64, String> = RawTable::new();
    /// table.insert(42u64, String::from("answer"));
    /// assert_eq!(table.get(&42u64), Some(&String::from("answer")));
    /// assert_eq!(table.get(&99u64), None);
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Returned reference is valid for the lifetime of `&self`. Caller must not
    /// mutate the table through other means while the reference is held (aliasing
    /// rules enforced by Rust borrow checker).
    ///
    /// # See Also
    /// - [`get_mut`](Self::get_mut) for mutable access.
    /// - [`insert`](Self::insert) to add new entries.
    #[inline]
    pub fn get(&self, key: &K) -> Option<&V> {
        let hash = key.hash_xxh3();
        let mut idx = usize::try_from(hash).unwrap() & self.mask;
        let mut dfh: u8 = 0;
        // SAFETY: probing loop bounded by empty slot or DfH < current dfh.
        // Reads only initialized keys/values where meta[i] > 0.
        unsafe {
            loop {
                let meta_val = *self.meta_ptr.add(idx);
                if meta_val == 0 || meta_val < dfh.saturating_add(1) {
                    return None;
                }
                if *self.keys_ptr.add(idx) == *key {
                    return Some(&*self.vals_ptr.add(idx));
                }
                idx = (idx + 1) & self.mask;
                dfh = dfh.checked_add(1).expect(
                    "DfH overflow: probe chain exceeded 255. \
                    This indicates a hash distribution problem or adversarial input.",
                );
            }
        }
    }

    /// Finds a mutable reference to the value associated with `key`.
    ///
    /// # Summary
    /// Returns `Some(&mut V)` if key exists, `None` otherwise.
    ///
    /// # Description
    /// Identical to `get` but yields a mutable reference for in-place modification.
    /// Useful for updating values without re-insertion.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<String, Vec<u8>> = RawTable::new();
    /// table.insert(String::from("data"), vec![1, 2, 3]);
    /// if let Some(v) = table.get_mut(&String::from("data")) {
    ///     v.push(4);
    /// }
    /// assert_eq!(table.get(&String::from("data")), Some(&vec![1, 2, 3, 4]));
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Returned mutable reference is valid for the lifetime of `&mut self`.
    /// Caller must not access the same key through other references while the
    /// mutable borrow is active (enforced by Rust borrow checker).
    ///
    /// # See Also
    /// - [`get`](Self::get) for immutable access.
    /// - [`insert`](Self::insert) to add new entries.
    #[inline]
    pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
        let hash = key.hash_xxh3();
        let mut idx = usize::try_from(hash).unwrap() & self.mask;
        let mut dfh: u8 = 0;
        // SAFETY: identical to `get`, but yields `&mut V` for in-place mutation.
        unsafe {
            loop {
                let meta_val = *self.meta_ptr.add(idx);
                if meta_val == 0 || meta_val < dfh.saturating_add(1) {
                    return None;
                }
                if *self.keys_ptr.add(idx) == *key {
                    return Some(&mut *self.vals_ptr.add(idx));
                }
                idx = (idx + 1) & self.mask;
                dfh = dfh.checked_add(1).expect(
                    "DfH overflow: probe chain exceeded 255. \
                    This indicates a hash distribution problem or adversarial input.",
                );
            }
        }
    }

    /// Removes a key-value pair using `DfH` backward shift deletion.
    ///
    /// # Summary
    /// Deletes a key if present; returns `true` on success, `false` if absent.
    ///
    /// # Description
    /// Probes to locate the key, then shifts subsequent entries backward to fill
    /// the gap, decrementing their `DfH` values. This eliminates tombstones and
    /// maintains probe continuity for future lookups.
    ///
    /// # Examples
    /// ```
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<i32, bool> = RawTable::new();
    /// table.insert(10, true);
    /// assert!(table.remove(&10));   // Success
    /// assert!(!table.remove(&10));  // Already removed
    /// assert_eq!(table.get(&10), None);
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Caller must ensure `K: PartialEq` for correct key matching.
    /// Dropped keys/values are deallocated via `ptr::drop_in_place`; no double-drop
    /// occurs due to `meta[i]` tracking.
    ///
    /// # See Also
    /// - [`insert`](Self::insert) to add entries.
    /// - [`clear`](Self::clear) to remove all entries at once.
    #[inline]
    pub fn remove(&mut self, key: &K) -> bool {
        let hash = key.hash_xxh3();
        let mut idx = usize::try_from(hash).unwrap() & self.mask;
        let mut dfh: u8 = 0;
        // SAFETY: probing finds occupied slot or terminates early.
        unsafe {
            loop {
                let meta_val = *self.meta_ptr.add(idx);
                if meta_val == 0 || meta_val < dfh {
                    return false;
                }
                if *self.keys_ptr.add(idx) == *key {
                    break;
                }
                idx = (idx + 1) & self.mask;
                dfh = dfh.checked_add(1).expect(
                    "DfH overflow: probe chain exceeded 255. \
                    This indicates a hash distribution problem or adversarial input.",
                );
            }
            // Drop the element being removed before shifting over it.
            ptr::drop_in_place(self.keys_ptr.add(idx));
            ptr::drop_in_place(self.vals_ptr.add(idx));
            // Backward shift loop: pull later cluster entries one slot back.
            let mut shift_idx = idx;
            loop {
                let next = (shift_idx + 1) & self.mask;
                let next_meta = *self.meta_ptr.add(next);
                if next_meta <= 1 {
                    // next is empty (0) or at its home slot (DfH=0); nothing left to shift.
                    break;
                }
                // Shift next into current; ownership moves — no extra drop needed.
                ptr::copy(self.keys_ptr.add(next), self.keys_ptr.add(shift_idx), 1);
                ptr::copy(self.vals_ptr.add(next), self.vals_ptr.add(shift_idx), 1);
                *self.meta_ptr.add(shift_idx) = next_meta - 1;
                shift_idx = next;
            }
            *self.meta_ptr.add(shift_idx) = 0;
            self.size -= 1;
            true
        }
    }

    /// Clears all elements, dropping keys/values but retaining capacity.
    ///
    /// # Summary
    /// Removes all entries and resets size to zero; allocation is preserved.
    ///
    /// # Description
    /// Iterates over all slots, dropping initialized keys/values where `meta[i] > 0`,
    /// then resets metadata to zero. Capacity and allocated memory are retained for
    /// reuse, avoiding reallocation on subsequent inserts.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<u64, String> = RawTable::with_capacity(32);
    /// table.insert(1, String::from("one"));
    /// table.insert(2, String::from("two"));
    /// assert_eq!(table.size(), 2);
    /// table.clear();
    /// assert_eq!(table.size(), 0);
    /// assert_eq!(table.capacity(), 32); // Capacity unchanged
    /// ```
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// All initialized slots are dropped via `ptr::drop_in_place`; no memory leaks
    /// occur. Metadata reset ensures no stale `DfH` values remain.
    ///
    /// # See Also
    /// - [`remove`](Self::remove) for selective deletion.
    /// - [`reserve`](Self::reserve) to adjust capacity post-clear.
    #[inline]
    pub fn clear(&mut self) {
        // SAFETY: iterates only occupied slots. Drops each, resets meta to 0.
        unsafe {
            for i in 0..self.capacity {
                if *self.meta_ptr.add(i) != 0 {
                    ptr::drop_in_place(self.keys_ptr.add(i));
                    ptr::drop_in_place(self.vals_ptr.add(i));
                    *self.meta_ptr.add(i) = 0;
                }
            }
        }
        self.size = 0;
    }

    /// Reserves additional capacity to accommodate future inserts.
    ///
    /// # Summary
    /// Ensures the table can hold at least `size + additional` elements without resize.
    ///
    /// # Description
    /// If the target capacity exceeds current capacity, rounds up to the nearest
    /// power of two and triggers a resize. No-op if sufficient capacity already exists.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let mut table: RawTable<&str, i32> = RawTable::new();
    /// table.reserve(100);
    /// assert!(table.capacity() >= 100);
    /// ```
    ///
    /// # Panics
    /// Panics if allocation fails during resize (OOM).
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Caller should call `reserve` before bulk inserts to avoid mid-operation resize.
    /// All existing references to values are invalidated if resize occurs.
    ///
    /// # See Also
    /// - [`with_capacity`](Self::with_capacity) for initial capacity planning.
    /// - [`insert`](Self::insert) which auto-resizes at 80% load factor.
    pub fn reserve(&mut self, additional: usize) {
        let target = self.size.saturating_add(additional);
        if target * 5 >= self.capacity * 4 {
            let new_cap = (target * 5 / 4).next_power_of_two();
            if new_cap > self.capacity {
                self.resize_to(new_cap);
            }
        }
    }

    /// Doubles capacity and rehashes all elements.
    ///
    /// # Summary
    /// Internal helper to grow the table by a factor of two.
    ///
    /// # Description
    /// Allocates a new `SoA` block with double capacity, re-inserts all existing
    /// elements using Robin Hood probing, then deallocates the old block.
    ///
    /// # Panics
    /// Panics if allocation fails (OOM).
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// All keys/values are moved via `ptr::read`/`ptr::write`; no double-drop occurs.
    /// Old block is deallocated only after successful migration.
    ///
    /// # See Also
    /// - [`resize_to`](Self::resize_to) for exact capacity resize.
    /// - [`insert`](Self::insert) which triggers this at 80% load factor.
    fn resize(&mut self) {
        self.resize_to(self.capacity * 2);
    }

    /// Resizes to exact power-of-two capacity.
    ///
    /// # Summary
    /// Internal helper to resize to a specific power-of-two capacity.
    ///
    /// # Description
    /// Allocates a new block, rehashes all elements, deallocates the old block,
    /// and updates internal pointers. Used by `resize` and `reserve`.
    ///
    /// # Panics
    /// Panics if allocation fails (OOM).
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Caller must ensure `new_capacity` is a power of two and `> current capacity`.
    /// All keys/values are moved safely; old block is deallocated after migration.
    ///
    /// # See Also
    /// - [`resize`](Self::resize) for doubling capacity.
    /// - [`reserve`](Self::reserve) for capacity planning.
    fn resize_to(&mut self, new_capacity: usize) {
        let old_cap = self.capacity;
        let old_base = self.base_ptr;
        let old_meta = self.meta_ptr;
        let old_keys = self.keys_ptr;
        let old_vals = self.vals_ptr;
        let k_size = mem::size_of::<K>();
        let v_size = mem::size_of::<V>();

        let new_base = unsafe { memory::allocate_block(new_capacity, k_size, v_size) };
        let (new_keys_offset, new_vals_offset) = {
            let (_, k_off, v_off) = memory::calc_layout(new_capacity, k_size, v_size);
            (k_off, v_off)
        };
        let (new_meta, new_keys, new_vals) =
            unsafe { memory::get_array_ptrs::<K, V>(new_base, new_keys_offset, new_vals_offset) };

        let mut new_size = 0;
        let new_mask = new_capacity - 1;

        // SAFETY: reads old block, writes new block. No overlap.
        unsafe {
            for i in 0..old_cap {
                if *old_meta.add(i) != 0 {
                    let k = ptr::read(old_keys.add(i));
                    let v = ptr::read(old_vals.add(i));
                    let hash = k.hash_xxh3();
                    let mut idx = usize::try_from(hash).unwrap() & new_mask;
                    let mut dfh: u8 = 0;
                    let mut cur_k = k;
                    let mut cur_v = v;

                    // Robin Hood migration to empty table (no duplicate check needed)
                    loop {
                        let meta_val = *new_meta.add(idx);
                        if meta_val == 0 {
                            ptr::write(new_keys.add(idx), cur_k);
                            ptr::write(new_vals.add(idx), cur_v);
                            *new_meta.add(idx) = dfh + 1;
                            new_size += 1;
                            break;
                        }

                        if meta_val < dfh + 1 {
                            // Displace entry with lower DfH
                            let tk = ptr::replace(new_keys.add(idx), cur_k);
                            let tv = ptr::replace(new_vals.add(idx), cur_v);
                            cur_k = tk;
                            cur_v = tv;
                            // Record cur_k's DfH at this slot so future probes see the right value.
                            *new_meta.add(idx) = dfh + 1;
                            // Displaced item had DfH = meta_val - 1.
                            // We move to next slot, so its DfH becomes (meta_val - 1) + 1 = meta_val.
                            // We set dfh here to meta_val - 1, then increment at loop end.
                            dfh = meta_val - 1;
                        }

                        idx = (idx + 1) & new_mask;
                        dfh = dfh.checked_add(1).expect(
                            "DfH overflow: probe chain exceeded 255. \
                            This indicates a hash distribution problem or adversarial input.",
                        );
                    }
                }
            }
        }

        // Debug invariant: compare against self.size (element count), not old_cap (slot count).
        debug_assert_eq!(new_size, self.size, "Migration lost elements!");

        unsafe {
            memory::deallocate_block(old_base, old_cap, k_size, v_size);
        }

        self.base_ptr = new_base;
        self.meta_ptr = new_meta;
        self.keys_ptr = new_keys;
        self.vals_ptr = new_vals;
        self.capacity = new_capacity;
        self.mask = new_mask;
        self.size = new_size;
        self.keys_offset = new_keys_offset;
        self.vals_offset = new_vals_offset;
    }

    /// Decrements the internal size counter by 1.
    ///
    /// # Summary
    /// Safely reduces the occupied slot count during `Drain` iteration.
    ///
    /// # Description
    /// Used internally by the `Drain` iterator to keep `size` synchronized as
    /// elements are extracted and metadata zeroed.
    ///
    /// # Examples
    /// Internal use only; not exposed in public API.
    ///
    /// # Panics
    /// Never panics.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Caller must ensure that an element was actually removed.
    ///
    /// # See Also
    /// - [`Drain`](crate::iter::Drain)
    #[inline]
    pub(crate) fn decrement_size(&mut self) {
        self.size = self.size.saturating_sub(1);
    }
}

impl<K: RobinHoodKey + PartialEq, V> Default for RawTable<K, V> {
    /// Creates an empty `RawTable` with default capacity.
    ///
    /// # Summary
    /// Implements the `Default` trait for ergonomic construction.
    ///
    /// # Description
    /// Delegates to `RawTable::new()`, providing a default capacity of 16 slots.
    ///
    /// # Examples
    /// ```ignore
    /// use robinxx_map::table::RawTable;
    ///
    /// let table: RawTable<u32, String> = RawTable::default();
    /// assert_eq!(table.capacity(), 16);
    /// ```
    ///
    /// # Panics
    /// Panics if allocation fails (OOM).
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// Not applicable.
    ///
    /// # See Also
    /// - [`new`](Self::new) for explicit construction.
    #[inline]
    fn default() -> Self {
        Self::new()
    }
}

// Drop impl: No trait bounds on K/V because ptr::drop_in_place is unsafe
// and we manually check initialization via meta[i] > 0.
impl<K, V> Drop for RawTable<K, V> {
    /// Drops all initialized keys/values and deallocates the `SoA` memory block.
    ///
    /// # Summary
    /// Cleans up resources when the table is destroyed.
    ///
    /// # Description
    /// Iterates over all slots, dropping initialized keys/values where `meta[i] > 0`,
    /// then deallocates the entire `SoA` block via `memory::deallocate_block`.
    ///
    /// # Examples
    /// Automatically invoked when `RawTable` goes out of scope.
    ///
    /// # Panics
    /// Never panics. Drop must not unwind.
    ///
    /// # Errors
    /// Not applicable.
    ///
    /// # Safety
    /// - Only slots with `meta[i] > 0` are dropped, guaranteeing initialized memory.
    /// - `base_ptr` is valid and exclusively owned; deallocation occurs exactly once.
    /// - No references to dropped keys/values may outlive this call.
    ///
    /// # See Also
    /// - [`clear`](Self::clear) for manual cleanup without deallocation.
    /// - [`memory::deallocate_block`] for low-level deallocation.
    fn drop(&mut self) {
        // SAFETY: drops all initialized slots before deallocation.
        // We iterate only where meta[i] > 0, guaranteeing initialized memory.
        unsafe {
            for i in 0..self.capacity {
                if *self.meta_ptr.add(i) != 0 {
                    ptr::drop_in_place(self.keys_ptr.add(i));
                    ptr::drop_in_place(self.vals_ptr.add(i));
                }
            }
            // base_ptr is already NonNull<u8>, direct usage is safe.
            memory::deallocate_block(
                self.base_ptr,
                self.capacity,
                mem::size_of::<K>(),
                mem::size_of::<V>(),
            );
        }
    }
}
#[cfg(test)]
mod tests {
    use super::*;
    use alloc::string::String;

    #[test]
    fn test_raw_table_basic_ops() {
        let mut table: RawTable<u32, String> = RawTable::new();
        assert!(table.is_empty());
        assert!(table.insert(1, String::from("one")));
        assert_eq!(table.size(), 1);
        assert_eq!(table.get(&1), Some(&String::from("one")));
        assert!(!table.insert(1, String::from("updated"))); // Duplicate
        assert!(table.remove(&1));
        assert_eq!(table.get(&1), None);
        assert!(table.is_empty());
    }

    #[test]
    fn test_raw_table_resize_chain() {
        let mut table: RawTable<usize, usize> = RawTable::with_capacity(4);
        for i in 0..20 {
            assert!(table.insert(i, i * 10));
        }
        assert!(table.capacity() >= 25);
        for i in 0..20 {
            assert_eq!(table.get(&i), Some(&(i * 10)));
        }
    }

    #[test]
    fn test_raw_table_clear_retains_capacity() {
        let mut table: RawTable<u64, u64> = RawTable::with_capacity(32);
        for i in 0..10 {
            table.insert(i, i);
        }
        let cap_before = table.capacity();
        table.clear();
        assert_eq!(table.size(), 0);
        assert_eq!(table.capacity(), cap_before);
    }
}