const_lru/
lib.rs

1#![no_std]
2#![doc = include_str!("../README.md")]
3
4use core::borrow::Borrow;
5use core::cmp::Ordering;
6use core::mem::MaybeUninit;
7use core::ptr::{self, addr_of_mut};
8use num_traits::{PrimInt, Unsigned};
9
10mod entry;
11mod errs;
12mod iters;
13
14pub use entry::*;
15pub use errs::*;
16pub use iters::into_iter::IntoIter;
17pub use iters::iter::Iter;
18pub use iters::iter_key_order::IterKeyOrder;
19pub use iters::iter_key_order_mut::IterKeyOrderMut;
20pub use iters::iter_mut::IterMut;
21
22use iters::iter_key_order::IterIndexed;
23use iters::iter_maybe_uninit::IterMaybeUninit;
24
25/// Constant capacity key-addressed LRU cache.
26///
27/// Generics:
28/// - `K`. Type of key. `Ord` is used for lookup and to address entries.
29/// - `V`. Type of value.
30/// - `CAP`. Capacity of the cache.
31/// - `I`. Type of the index used. Must be an unsigned primitive type with bitwidth <= `usize`'s bitwidth.
32#[derive(Debug)]
33pub struct ConstLru<K, V, const CAP: usize, I: PrimInt + Unsigned = usize> {
34    len: I,
35
36    /// head is index of most recently used
37    ///
38    /// can be any value if cache is empty
39    head: I,
40
41    /// tail is index of least recently used
42    ///
43    /// if cache is empty, tail is the first slot of unallocated memory / "free-list"
44    /// else, next of the tail is the first slot of unallocated memory / "free-list"
45    ///
46    /// tail is always < CAP
47    tail: I,
48
49    /// binary search index
50    bs_index: [I; CAP],
51
52    /// disregard if value == CAP
53    nexts: [I; CAP],
54
55    /// disregard if value == CAP
56    prevs: [I; CAP],
57
58    keys: [MaybeUninit<K>; CAP],
59
60    values: [MaybeUninit<V>; CAP],
61}
62
63impl<K, V, const CAP: usize, I: PrimInt + Unsigned> ConstLru<K, V, CAP, I> {
64    /// Creates a new empty `ConstLru` on the stack
65    ///
66    /// panics if
67    /// - `CAP > I::MAX`
68    /// - `I::MAX > usize::MAX`
69    ///
70    /// WARNING: this might result in runtime stack overflow errors for large `CAP`.
71    /// Use [`Self::init_at_alloc`] to initialize larger variants at preallocated memory
72    pub fn new() -> Self {
73        let mut res: MaybeUninit<Self> = MaybeUninit::uninit();
74        unsafe {
75            Self::init_at_alloc(res.as_mut_ptr());
76            res.assume_init()
77        }
78    }
79
80    /// Initializes the ConstLru at a region of allocated memory
81    ///
82    /// # Safety
83    /// `ptr` must point to uninitialized memory, since init()
84    /// overwrites the data at `ptr`
85    ///
86    /// panics if
87    /// - `CAP > I::MAX`
88    /// - `I::MAX > usize::MAX`
89    ///
90    /// Example:
91    ///
92    /// ```
93    /// use const_lru::ConstLru;
94    /// use std::alloc::{alloc, Layout};
95    ///
96    /// let layout = Layout::new::<ConstLru<u32, u16, 1_000, u16>>();
97    /// let container: Box<ConstLru<u32, u16, 1_000, u16>> = unsafe {
98    ///     let ptr = alloc(layout) as *mut ConstLru<u32, u16, 1_000, u16>;
99    ///     ConstLru::init_at_alloc(ptr);
100    ///     Box::from_raw(ptr)
101    /// };
102    /// ```
103    pub unsafe fn init_at_alloc(ptr: *mut Self) {
104        // using as_mut_ptr from MaybeUninit is UB,
105        // initialize fields using addr_of_mut!()
106
107        let i_max = I::max_value()
108            .to_usize()
109            .unwrap_or_else(|| panic!("I::MAX > usize::MAX"));
110        if CAP > i_max {
111            panic!("CAP > I::MAX");
112        }
113
114        let cap = I::from(CAP).unwrap();
115
116        addr_of_mut!((*ptr).len).write(I::zero());
117        addr_of_mut!((*ptr).head).write(cap);
118        addr_of_mut!((*ptr).tail).write(I::zero());
119
120        // nexts = [1, 2, ..., cap-1, cap]
121        for i in 0..CAP {
122            addr_of_mut!((*ptr).nexts[i]).write(I::from(i + 1).unwrap());
123        }
124
125        // prevs = [cap, 0, 1, ..., cap-2]
126        if CAP > 0 {
127            addr_of_mut!((*ptr).prevs[0]).write(cap);
128            for i in 1..CAP {
129                addr_of_mut!((*ptr).prevs[i]).write(I::from(i - 1).unwrap());
130            }
131        }
132
133        // bs_index = [cap, ..., cap]
134        // UB if not initialized
135        for i in 0..CAP {
136            addr_of_mut!((*ptr).bs_index[i]).write(cap);
137        }
138
139        // keys and values should remain uninitialized
140    }
141
142    /// private helper fn.
143    ///
144    /// Unlinks the node at `index` from the doubly-linked list,
145    /// patching its previous and next nodes, as well as self.head and self.tail if required.
146    ///
147    /// Can be used on both valid and invalid nodes.
148    ///
149    /// When this fn returns, `index`'s next and prev should be treated as invalid
150    ///
151    /// `self.head` and `self.tail` are not modified if only 1 elem in list
152    ///
153    /// Requirements:
154    /// - index < CAP
155    fn unlink_node(&mut self, index: I) {
156        let i = index.to_usize().unwrap();
157        let next = self.nexts[i];
158        let prev = self.prevs[i];
159
160        // index.next.prev = index.prev
161        if next != self.cap() {
162            self.prevs[next.to_usize().unwrap()] = prev;
163        }
164
165        // index.prev.next = index.next
166        if prev != self.cap() {
167            self.nexts[prev.to_usize().unwrap()] = next;
168        }
169
170        let is_one_elem_list = self.head == self.tail;
171
172        if self.head == index && !is_one_elem_list {
173            self.head = next;
174        }
175
176        if self.tail == index && !is_one_elem_list {
177            self.tail = prev;
178        }
179    }
180
181    /// private helper fn.
182    ///
183    /// Moves the element at index to the most-recently-used position.
184    ///
185    /// Requirements:
186    /// - !self.is_empty()
187    /// - index must be that of a valid node
188    fn move_to_head(&mut self, index: I) {
189        if self.head == index {
190            return;
191        }
192
193        self.unlink_node(index);
194        let i = index.to_usize().unwrap();
195
196        // since self.head != index
197        // and index is valid,
198        // head must be valid
199        let head = self.head;
200        self.prevs[i] = self.cap();
201        self.nexts[i] = head;
202
203        self.prevs[head.to_usize().unwrap()] = index;
204
205        self.head = index;
206    }
207
208    /// Cleanup for drop impl. Drops keys and values.
209    /// Other fields should be all primitive types
210    fn drop_cleanup(&mut self) {
211        for (k, v) in IterMaybeUninit::new(self) {
212            unsafe {
213                k.assume_init_drop();
214                v.assume_init_drop();
215            }
216        }
217    }
218
219    /// Creates an iterator that iterates through the keys and values of the `ConstLru` from most-recently-used to least-recently-used
220    ///
221    /// Does not change the LRU order of the elements.
222    ///
223    /// Double-ended: reversing iterates from least-recently-used to most-recently-used
224    pub fn iter(&self) -> Iter<K, V, CAP, I> {
225        Iter::new(self)
226    }
227
228    /// Creates an iterator that iterates through the keys and mutable values of the `ConstLru` from most-recently-used to least-recently-used
229    ///
230    /// Does not change the LRU order of the elements, even if mutated.
231    ///
232    /// Double-ended: reversing iterates from least-recently-used to most-recently-used
233    pub fn iter_mut(&mut self) -> IterMut<K, V, CAP, I> {
234        IterMut::new(self)
235    }
236
237    /// Creates an iterator that iterates through the keys and values of the `ConstLru` in the order of its keys
238    ///
239    /// Does not change the LRU order of the elements.
240    ///
241    /// Double-ended: reversing iterates from descending order of its keys
242    pub fn iter_key_order(&self) -> IterKeyOrder<K, V, CAP, I> {
243        IterKeyOrder::new(self)
244    }
245
246    /// Creates an iterator that iterates through the keys and mutable values of the `ConstLru` in the order of its keys
247    ///
248    /// Does not change the LRU order of the elements, even if mutated.
249    ///
250    /// Double-ended: reversing iterates from descending order of its keys
251    pub fn iter_key_order_mut(&mut self) -> IterKeyOrderMut<K, V, CAP, I> {
252        IterKeyOrderMut::new(self)
253    }
254
255    /// Clears the `ConstLru`, removing all key-value pairs.
256    pub fn clear(&mut self) {
257        self.drop_cleanup();
258        let ptr_to_self: *mut Self = self;
259        unsafe { Self::init_at_alloc(ptr_to_self) }
260    }
261
262    /// Returns the maximum number of elements this `ConstLru` can hold
263    pub fn cap(&self) -> I {
264        I::from(CAP).unwrap()
265    }
266
267    /// Returns `true` if the `ConstLru` contains no elements.
268    pub fn is_empty(&self) -> bool {
269        self.len() == I::zero()
270    }
271
272    /// Returns `true` if the `ConstLru` has reached max capacity.
273    pub fn is_full(&self) -> bool {
274        self.len() == self.cap()
275    }
276
277    /// Returns the number of elements in the `ConstLru`.
278    pub fn len(&self) -> I {
279        self.len
280    }
281
282    /// Assumes `index` is of a valid node
283    /// Moves `index` to MRU position
284    fn insert_replace_value(&mut self, index: I, replacement: V) -> V {
285        let old_v = unsafe { self.values[index.to_usize().unwrap()].assume_init_mut() };
286        let old_v_out = core::mem::replace(old_v, replacement);
287        self.move_to_head(index);
288        old_v_out
289    }
290
291    // Assumes N > 0 and self is not full
292    // Moves newly inserted elem to MRU position
293    // Returns index entry was inserted into
294    fn insert_alloc_new(&mut self, insert_bs_i: I, k: K, v: V) -> I {
295        let free_index = if self.is_empty() {
296            self.head = self.tail;
297            self.tail
298        } else {
299            self.nexts[self.tail.to_usize().unwrap()]
300        };
301        self.tail = free_index;
302        let f = free_index.to_usize().unwrap();
303        self.keys[f].write(k);
304        self.values[f].write(v);
305
306        if insert_bs_i < self.len {
307            // shift everything between [bs_i, len) right
308            unsafe {
309                let insert_bs_i_ptr = self
310                    .bs_index
311                    .as_mut_ptr()
312                    .add(insert_bs_i.to_usize().unwrap());
313                ptr::copy(
314                    insert_bs_i_ptr,
315                    insert_bs_i_ptr.add(1),
316                    (self.len - insert_bs_i).to_usize().unwrap(),
317                );
318            }
319        }
320        self.bs_index[insert_bs_i.to_usize().unwrap()] = free_index;
321
322        self.len = self.len + I::one();
323
324        self.move_to_head(self.tail);
325        free_index
326    }
327
328    // Assumes index tuple is of a valid node. Should be result of Ok returned by self.get_index_of()
329    fn remove_by_index(&mut self, (index, bs_i): (I, I)) -> (K, V) {
330        let i = index.to_usize().unwrap();
331
332        let key = unsafe { self.keys[i].assume_init_read() };
333        let val = unsafe { self.values[i].assume_init_read() };
334
335        // if len == 1, correct links are already in place
336        if self.len() > I::one() {
337            // len > 1
338            // move to front of free list
339            self.unlink_node(index);
340            let t = self.tail.to_usize().unwrap();
341            let first_free = self.nexts[t];
342
343            if first_free < self.cap() {
344                self.prevs[first_free.to_usize().unwrap()] = index;
345            }
346            self.nexts[i] = first_free;
347
348            self.prevs[i] = self.tail;
349            self.nexts[t] = index;
350        }
351
352        unsafe {
353            let bs_i_ptr = self.bs_index.as_mut_ptr().add(bs_i.to_usize().unwrap());
354            // shift everything left to fill bs_i
355            ptr::copy(
356                bs_i_ptr.add(1),
357                bs_i_ptr,
358                (self.len - bs_i - I::one()).to_usize().unwrap(),
359            );
360        }
361
362        self.len = self.len - I::one();
363        (key, val)
364    }
365
366    /// Assumes index is valid
367    fn get_by_index(&self, index: I) -> &V {
368        unsafe { self.values[index.to_usize().unwrap()].assume_init_ref() }
369    }
370
371    /// Assumes index is valid
372    fn get_mut_by_index(&mut self, index: I) -> &mut V {
373        unsafe { self.values[index.to_usize().unwrap()].assume_init_mut() }
374    }
375}
376
377impl<K: Ord, V, const CAP: usize, I: PrimInt + Unsigned> ConstLru<K, V, CAP, I> {
378    /// Inserts a key-value pair into the map. The entry is moved to the most-recently-used slot
379    ///
380    /// If `CAP == 0`, `None` is returned.
381    ///
382    /// If the map did not have this key present and is not full, `None` is returned.
383    ///
384    /// If the map did have this key present, the value is updated, and the old value is returned in a [`InsertReplaced::OldValue`].
385    /// The key is not updated, though; this matters for types that can be `==` without being identical.
386    ///
387    /// If the map is full, the least-recently used key-value pair is evicted and returned in a [`InsertReplaced::LruEvicted`].
388    pub fn insert(&mut self, k: K, v: V) -> Option<InsertReplaced<K, V>> {
389        if CAP == 0 {
390            return None;
391        }
392        let insert_bs_i = match self.get_index_of(&k) {
393            Ok((existing_index, _)) => {
394                return Some(InsertReplaced::OldValue(
395                    self.insert_replace_value(existing_index, v),
396                ))
397            }
398            Err(i) => i,
399        };
400        if self.is_full() {
401            let (_, (old_k, old_v)) = self.insert_evict_lru(insert_bs_i, k, v);
402            Some(InsertReplaced::LruEvicted(old_k, old_v))
403        } else {
404            self.insert_alloc_new(insert_bs_i, k, v);
405            None
406        }
407    }
408
409    /// Assumes N > 0 and self is full
410    /// Moves newly inserted elem to MRU position
411    ///
412    /// Returns (index entry was inserted into, evicted entry)
413    fn insert_evict_lru(&mut self, insert_bs_i: I, k: K, v: V) -> (I, (K, V)) {
414        // N > 0, tail must be valid
415        let i = self.tail;
416        let t = i.to_usize().unwrap();
417        let evicted_k = unsafe { self.keys[t].assume_init_read() };
418        let evicted_v = unsafe { self.values[t].assume_init_read() };
419        let Ok((_should_be_t, evicted_bs_i)) = self.get_index_of(&evicted_k) else { unreachable!() };
420        self.keys[t].write(k);
421        self.values[t].write(v);
422
423        match insert_bs_i.cmp(&evicted_bs_i) {
424            // nothing to be done, bs_index[insert_bs_i] already == tail
425            Ordering::Equal => (),
426            Ordering::Less => {
427                // shift everything between [insert_bs_i, evicted_bs_i) right
428                // then insert at insert_bs_i
429                let b = insert_bs_i.to_usize().unwrap();
430                unsafe {
431                    let bs_i_ptr = self.bs_index.as_mut_ptr().add(b);
432                    ptr::copy(
433                        bs_i_ptr,
434                        bs_i_ptr.add(1),
435                        (evicted_bs_i - insert_bs_i).to_usize().unwrap(),
436                    );
437                }
438                self.bs_index[b] = self.tail;
439            }
440            Ordering::Greater => {
441                // shift everything between (evicted_bs_i, bs_i - 1] left
442                // then insert at bs_i - 1
443
444                // safety: greater, so bs_i must be > 0
445                let inser_bs_i_sub_1 = insert_bs_i - I::one();
446                unsafe {
447                    let evicted_bs_i_ptr = self
448                        .bs_index
449                        .as_mut_ptr()
450                        .add(evicted_bs_i.to_usize().unwrap());
451                    ptr::copy(
452                        evicted_bs_i_ptr.add(1),
453                        evicted_bs_i_ptr,
454                        (inser_bs_i_sub_1 - evicted_bs_i).to_usize().unwrap(),
455                    );
456                }
457                self.bs_index[inser_bs_i_sub_1.to_usize().unwrap()] = self.tail;
458            }
459        }
460
461        self.move_to_head(self.tail);
462        (i, (evicted_k, evicted_v))
463    }
464
465    /// Removes a key from the `ConstLru`, returning the value at the key if the key was previously in the `ConstLru`.
466    pub fn remove<Q: Ord + ?Sized>(&mut self, k: &Q) -> Option<V>
467    where
468        K: Borrow<Q>,
469    {
470        let tup = self.get_index_of(k).ok()?;
471        Some(self.remove_by_index(tup).1)
472    }
473
474    /// Returns a reference to the value corresponding to the key and moves entry to most-recently-used slot.
475    ///
476    /// To not update to most-recently-used, use [`Self::get_untouched`]
477    pub fn get<Q: Ord + ?Sized>(&mut self, k: &Q) -> Option<&V>
478    where
479        K: Borrow<Q>,
480    {
481        let (index, _) = self.get_index_of(k).ok()?;
482        self.move_to_head(index);
483        Some(self.get_by_index(index))
484    }
485
486    /// Returns a mutable reference to the value corresponding to the key and moves entry to most-recently-used slot.
487    ///
488    /// To not update to most-recently-used, use [`Self::get_mut_untouched`]
489    pub fn get_mut<Q: Ord + ?Sized>(&mut self, k: &Q) -> Option<&mut V>
490    where
491        K: Borrow<Q>,
492    {
493        let (index, _) = self.get_index_of(k).ok()?;
494        self.move_to_head(index);
495        Some(self.get_mut_by_index(index))
496    }
497
498    /// Ok(kv_i, bs_index_i)
499    ///
500    /// Err(bs_index_i)
501    fn get_index_of<Q: Ord + ?Sized>(&self, k: &Q) -> Result<(I, I), I>
502    where
503        K: Borrow<Q>,
504    {
505        let l = self.len().to_usize().unwrap();
506        let valid_bs_index = &self.bs_index[0..l];
507        valid_bs_index
508            .binary_search_by(|probe_index| {
509                let p = probe_index.to_usize().unwrap();
510                let probe = unsafe { self.keys[p].assume_init_ref() };
511                probe.borrow().cmp(k)
512            })
513            .map(|bs_i| (self.bs_index[bs_i], I::from(bs_i).unwrap()))
514            .map_err(|new_bsi| I::from(new_bsi).unwrap())
515    }
516
517    /// Returns a reference to the value corresponding to the key without updating the entry to most-recently-used slot
518    ///
519    /// To update to most-recently-used, use [`Self::get`]
520    pub fn get_untouched<Q: Ord + ?Sized>(&self, k: &Q) -> Option<&V>
521    where
522        K: Borrow<Q>,
523    {
524        let (index, _) = self.get_index_of(k).ok()?;
525        Some(self.get_by_index(index))
526    }
527
528    /// Returns a mutable reference to the value corresponding to the key without updating the entry to most-recently-used slot
529    ///
530    /// To update to most-recently-used, use [`Self::get_mut`]
531    pub fn get_mut_untouched<Q: Ord + ?Sized>(&mut self, k: &Q) -> Option<&mut V>
532    where
533        K: Borrow<Q>,
534    {
535        let (index, _) = self.get_index_of(k).ok()?;
536        Some(self.get_mut_by_index(index))
537    }
538
539    /// Gets the given key’s corresponding entry in the map for in-place manipulation.
540    ///
541    /// **panics** if CAP == 0
542    pub fn entry(&mut self, k: K) -> Entry<'_, K, V, CAP, I> {
543        Entry::new(self, k)
544    }
545}
546
547impl<K: Clone, V: Clone, const CAP: usize, I: PrimInt + Unsigned> ConstLru<K, V, CAP, I> {
548    /// Clones the ConstLru to a region of allocated memory
549    ///
550    /// # Safety
551    /// `dst` must point to uninitialized memory, since this
552    /// overwrites the data at `dst`
553    pub unsafe fn clone_to_alloc(&self, dst: *mut Self) {
554        addr_of_mut!((*dst).len).write(self.len);
555        addr_of_mut!((*dst).head).write(self.head);
556        addr_of_mut!((*dst).tail).write(self.tail);
557
558        // .write(self.nexts) result in stack overflow for large CAP, so use raw memmove
559        ptr::copy(
560            self.nexts.as_ptr(),
561            addr_of_mut!((*dst).nexts) as *mut I,
562            CAP,
563        );
564        ptr::copy(
565            self.prevs.as_ptr(),
566            addr_of_mut!((*dst).prevs) as *mut I,
567            CAP,
568        );
569        ptr::copy(
570            self.bs_index.as_ptr(),
571            addr_of_mut!((*dst).bs_index) as *mut I,
572            CAP,
573        );
574
575        for (index, k, v) in IterIndexed::new(self) {
576            let i = index.to_usize().unwrap();
577            addr_of_mut!((*dst).keys[i]).write(MaybeUninit::new(k.clone()));
578            addr_of_mut!((*dst).values[i]).write(MaybeUninit::new(v.clone()));
579        }
580    }
581}
582
583/// WARNING: this might result in runtime stack overflow errors for large `CAP`.
584/// To clone a large `ConstLru`, use [`ConstLru::clone_to_alloc`]
585impl<K: Clone, V: Clone, const CAP: usize, I: PrimInt + Unsigned> Clone for ConstLru<K, V, CAP, I> {
586    fn clone(&self) -> Self {
587        let mut res: MaybeUninit<Self> = MaybeUninit::uninit();
588        unsafe {
589            self.clone_to_alloc(res.as_mut_ptr());
590            res.assume_init()
591        }
592    }
593}
594
595impl<K, V, const CAP: usize, I: PrimInt + Unsigned> Default for ConstLru<K, V, CAP, I> {
596    fn default() -> Self {
597        Self::new()
598    }
599}
600
601impl<K, V, const CAP: usize, I: PrimInt + Unsigned> Drop for ConstLru<K, V, CAP, I> {
602    fn drop(&mut self) {
603        self.drop_cleanup();
604    }
605}
606
607impl<K, V, const CAP: usize, I: PrimInt + Unsigned> IntoIterator for ConstLru<K, V, CAP, I> {
608    type Item = <IntoIter<K, V, CAP, I> as Iterator>::Item;
609
610    type IntoIter = IntoIter<K, V, CAP, I>;
611
612    fn into_iter(self) -> Self::IntoIter {
613        IntoIter::new(self)
614    }
615}
616
617/// Optional return type of [`ConstLru::insert`]
618#[derive(Debug, Clone, Copy, PartialEq, Eq)]
619pub enum InsertReplaced<K, V> {
620    LruEvicted(K, V),
621    OldValue(V),
622}
623
624/// Creates a full ConstLru cache from an `entries` array.
625///
626/// Assumes `entries` is in MRU -> LRU order.
627///
628/// Returns error if duplicate keys found.
629///
630/// WARNING: this might result in runtime stack overflow errors for large `CAP`.
631impl<K: Ord, V, const CAP: usize, I: PrimInt + Unsigned> TryFrom<[(K, V); CAP]>
632    for ConstLru<K, V, CAP, I>
633{
634    type Error = DuplicateKeysError<K>;
635
636    fn try_from(entries: [(K, V); CAP]) -> Result<Self, Self::Error> {
637        // entries need to fit on the stack too, so Self::new() shouldn't stack overflow
638        let mut res = Self::new();
639        res.len = res.cap();
640        res.head = I::zero();
641        res.tail = if CAP > 0 {
642            res.len - I::one()
643        } else {
644            I::zero()
645        };
646
647        for (i, (k, v)) in entries.into_iter().enumerate() {
648            res.keys[i].write(k);
649            res.values[i].write(v);
650        }
651
652        for (i, val) in res.bs_index.iter_mut().enumerate() {
653            *val = I::from(i).unwrap();
654        }
655        res.bs_index.sort_unstable_by(|a, b| {
656            let k_a = unsafe { res.keys[a.to_usize().unwrap()].assume_init_ref() };
657            let k_b = unsafe { res.keys[b.to_usize().unwrap()].assume_init_ref() };
658            k_a.cmp(k_b)
659        });
660
661        if CAP > 1 {
662            for w in res.bs_index.windows(2) {
663                let index_1 = w[0];
664                let i1 = index_1.to_usize().unwrap();
665                let i2 = w[1].to_usize().unwrap();
666                let k1 = unsafe { res.keys[i1].assume_init_ref() };
667                let k2 = unsafe { res.keys[i2].assume_init_ref() };
668                if k1 == k2 {
669                    // remove from list so no double free
670                    res.unlink_node(index_1);
671                    res.len = res.len - I::one();
672
673                    // cleanup value
674                    unsafe { res.values[i1].assume_init_drop() };
675                    let k_copied_out = unsafe { res.keys[i1].assume_init_read() };
676                    return Err(DuplicateKeysError(k_copied_out));
677                }
678            }
679        }
680
681        Ok(res)
682    }
683}