compressed_intvec/fixed/atomic/
mod.rs

1//! A thread-safe, compressed vector of integers with fixed-width encoding.
2//!
3//! This module provides [`AtomicFixedVec`], a data structure that behaves like
4//! [`FixedVec`] but allows for concurrent access and
5//! modification from multiple threads. It is designed for scenarios where a
6//! large collection of integers must be shared and mutated in a parallel
7//! environment.
8//!
9//! All operations that modify the vector's contents are implemented using
10//! atomic instructions (e.g., compare-and-swap loops), ensuring thread safety
11//! without requiring a global lock.
12//!
13//! # Atomicity Guarantees and Locking
14//!
15//! The atomicity of operations depends on the configured `bit_width`.
16//!
17//! - **Power-of-Two `bit_width`**: When the `bit_width` is a power of two
18//!   (e.g., 2, 4, 8, 16, 32), and it evenly divides the 64-bit word size,
19//!   most operations can be performed with lock-free atomic instructions.
20//!   This is because each element is guaranteed to be fully contained within a
21//!   single [`AtomicU64`] word.
22//!
23//! - **Non-Power-of-Two `bit_width`**: When the `bit_width` is not a power of
24//!   two, an element's value may span across the boundary of two [`AtomicU64`]
25//!   words. Modifying such an element requires updating two words simultaneously,
26//!   which cannot be done in a single atomic hardware instruction.
27//!
28//! To handle this case, [`AtomicFixedVec`] uses a technique called _lock striping_.
29//! It maintains a pool of [`parking_lot::Mutex`] locks. When an operation needs
30//! to modify a value that spans two words, it acquires a lock for that specific
31//! memory region. This ensures that the two-word update is itself atomic with
32//! respect to other threads, while still allowing concurrent operations on
33//! different parts of the vector. This approach avoids a single global lock,
34//! preserving a high degree of parallelism.
35//!
36//! > Future version may introduce a more sophisticated locking strategy
37//!
38//! ### Performance Considerations
39//!
40//! The trade-off is between memory compactness and performance. While a
41//! non-power-of-two `bit_width` provides the most space-efficient storage,
42//! it may incur a performance overhead for write operations that span word
43//! boundaries due to locking.
44//!
45//! For write-heavy, performance-critical workloads, choosing a power-of-two
46//! `bit_width` (e.g., by using [`BitWidth::PowerOfTwo`]) is recommended to
47//! ensure all operations remain lock-free.
48//!
49//! # Examples
50//!
51//! ## Basic Usage
52//!
53//! ```
54//! use compressed_intvec::prelude::*;
55//! use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec};
56//! use std::sync::Arc;
57//! use std::thread;
58//! use std::sync::atomic::Ordering;
59//!
60//! // Create from a slice using the builder.
61//! let initial_data: Vec<u32> = vec![10, 20, 30, 40];
62//! let atomic_vec: Arc<UAtomicFixedVec<u32>> = Arc::new(
63//!     AtomicFixedVec::builder()
64//!         .build(&initial_data)
65//!         .unwrap()
66//! );
67//!
68//! // Share the vector across threads.
69//! let mut handles = vec![];
70//! for i in 0..4 {
71//!     let vec_clone = Arc::clone(&atomic_vec);
72//!     handles.push(thread::spawn(move || {
73//!         // Each thread atomically updates its own slot.
74//!         vec_clone.store(i, 63, Ordering::SeqCst);
75//!     }));
76//! }
77//! for handle in handles {
78//!     handle.join().unwrap();
79//! }
80//! assert_eq!(atomic_vec.load(3, Ordering::SeqCst), 63);
81//! ```
82//!
83//! ## Storing Signed Integers
84//!
85//! [`AtomicFixedVec`] can also store signed integers. The underlying [`Storable`]
86//! trait uses zig-zag encoding to store signed values efficiently, so that
87//! small negative numbers require few bits, just like small positive numbers.
88//!
89//! ```
90//! use compressed_intvec::prelude::*;
91//! use compressed_intvec::fixed::{AtomicFixedVec, SAtomicFixedVec};
92//! use std::sync::Arc;
93//! use std::sync::atomic::Ordering;
94//!
95//! // The values range from -2 to 1. To also store -3 later, we need 3 bits.
96//! let initial_data: Vec<i16> = vec![-2, -1, 0, 1];
97//! let atomic_vec: Arc<SAtomicFixedVec<i16>> = Arc::new(
98//!     AtomicFixedVec::builder()
99//!         .bit_width(BitWidth::Explicit(3)) // Explicitly set bit width
100//!         .build(&initial_data)
101//!         .unwrap()
102//! );
103//!
104//! assert_eq!(atomic_vec.bit_width(), 3);
105//! assert_eq!(atomic_vec.load(0, Ordering::SeqCst), -2);
106//!
107//! // Atomically update a value.
108//! atomic_vec.store(0, -3, Ordering::SeqCst);
109//! assert_eq!(atomic_vec.load(0, Ordering::SeqCst), -3);
110//! ```
111//!
112//! //! ## Parallel Iteration
113//!
114//! When the `parallel` feature is enabled (by default is enabled), you can use [`par_iter`](AtomicFixedVec::par_iter) to process
115//! the vector's elements concurrently.
116//!
117//! ```
118//! # #[cfg(feature = "parallel")] {
119//! use compressed_intvec::prelude::*;
120//! use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec};
121//! use rayon::prelude::*;
122//!
123//! let data: Vec<u32> = (0..10_000).collect();
124//! let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
125//!     .build(&data)
126//!     .unwrap();
127//!
128//! // Use the parallel iterator to find the sum of all even numbers.
129//! let sum_of_evens: u32 = vec.par_iter()
130//!     .filter(|&x| x % 2 == 0)
131//!     .sum();
132//!
133//! let expected_sum: u32 = (0..10_000).filter(|&x| x % 2 == 0).sum();
134//! assert_eq!(sum_of_evens, expected_sum);
135//! # }
136//! ```
137//!
138//! ### Memory Ordering and Locking
139//!
140//! The memory [`Ordering`] specified in methods like [`load`](AtomicFixedVec::load), [`store`](AtomicFixedVec::store), or
141//! [`fetch_add`](AtomicFixedVec::fetch_add) is always respected, but its interaction with the internal locking
142//! mechanism is important to understand.
143//!
144//! -   **Lock-Free Path**: When an element is fully contained within a single
145//!     [`u64`] word, the specified [`Ordering`] is applied directly to the underlying
146//!     atomic instructions, providing the standard guarantees described in the
147//!     Rust documentation.
148//!
149//! -   **Locked Path**: When an element spans two [`u64`] words, a fine-grained
150//!     mutex is acquired. This lock ensures that the two-word operation is
151//!     atomic with respect to other locked operations on the same memory region.
152//!     The specified [`Ordering`] is then applied to the atomic writes performed
153//!     within the locked critical section. This guarantees that the effects of
154//!     the operation become visible to other threads according to the chosen
155//!     ordering, but the visibility is still mediated by the mutual exclusion
156//!     provided by the lock.
157//!
158
159#[macro_use]
160pub mod macros;
161pub mod builder;
162
163use crate::fixed::traits::Storable;
164use crate::fixed::{BitWidth, Error, FixedVec};
165use mem_dbg::{DbgFlags, MemDbgImpl, MemSize, SizeFlags};
166use num_traits::{Bounded, ToPrimitive, WrappingAdd, WrappingSub};
167use parking_lot::Mutex;
168use std::marker::PhantomData;
169use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut};
170use std::sync::atomic::{AtomicU64, Ordering};
171
172#[cfg(feature = "parallel")]
173use rayon::prelude::*;
174
175/// A thread-safe `FixedVec` for unsigned integers.
176pub type UAtomicFixedVec<T> = AtomicFixedVec<T>;
177/// A thread-safe `FixedVec` for signed integers.
178pub type SAtomicFixedVec<T> = AtomicFixedVec<T>;
179
180/// The upper bound on the number of locks to prevent excessive memory usage.
181const MAX_LOCKS: usize = 1024;
182/// The minimum number of locks to create, ensuring some striping even for small vectors.
183const MIN_LOCKS: usize = 2;
184/// A heuristic to determine the stripe size: one lock per this many data words.
185const WORDS_PER_LOCK: usize = 64;
186
187/// A proxy object for mutable access to an element within an [`AtomicFixedVec`]
188/// during parallel iteration.
189///
190/// This struct is returned by the [`par_iter_mut`](AtomicFixedVec::par_iter_mut)
191/// parallel iterator. It holds a temporary copy of an element's value. When the
192/// proxy is dropped, its `Drop` implementation atomically writes the (potentially
193/// modified) value back into the parent vector.
194#[cfg(feature = "parallel")]
195pub struct AtomicMutProxy<'a, T>
196where
197    T: Storable<u64> + Copy + ToPrimitive,
198{
199    vec: &'a AtomicFixedVec<T>,
200    index: usize,
201    value: T,
202}
203
204#[cfg(feature = "parallel")]
205impl<'a, T> AtomicMutProxy<'a, T>
206where
207    T: Storable<u64> + Copy + ToPrimitive,
208{
209    /// Creates a new `AtomicMutProxy`.
210    ///
211    /// This is called by `par_iter_mut`. It reads the initial value
212    /// from the vector.
213    fn new(vec: &'a AtomicFixedVec<T>, index: usize) -> Self {
214        let value = vec.load(index, Ordering::Relaxed);
215        Self { vec, index, value }
216    }
217
218    /// Consumes the proxy, returning the current value without writing it back.
219    ///
220    /// This can be used to avoid the overhead of a write operation if the value
221    /// was read but not modified.
222    pub fn into_inner(self) -> T {
223        use std::mem;
224
225        let value = self.value;
226        mem::forget(self); // Prevent the proxy from writing back
227        value
228    }
229}
230
231#[cfg(feature = "parallel")]
232impl<T> Deref for AtomicMutProxy<'_, T>
233where
234    T: Storable<u64> + Copy + ToPrimitive,
235{
236    type Target = T;
237
238    fn deref(&self) -> &Self::Target {
239        &self.value
240    }
241}
242
243#[cfg(feature = "parallel")]
244impl<T> DerefMut for AtomicMutProxy<'_, T>
245where
246    T: Storable<u64> + Copy + ToPrimitive,
247{
248    fn deref_mut(&mut self) -> &mut Self::Target {
249        &mut self.value
250    }
251}
252
253#[cfg(feature = "parallel")]
254impl<T> Drop for AtomicMutProxy<'_, T>
255where
256    T: Storable<u64> + Copy + ToPrimitive,
257{
258    /// Writes the potentially modified value back to the [`AtomicFixedVec`] when the
259    /// proxy goes out of scope.
260    fn drop(&mut self) {
261        // The value is copied before being passed to store.
262        // Relaxed ordering is sufficient here because the synchronization is
263        // handled by Rayon's fork-join model. The writes will be visible after
264        // the parallel block completes.
265        self.vec.store(self.index, self.value, Ordering::Relaxed);
266    }
267}
268
269/// A thread-safe, compressed, randomly accessible vector of integers with
270/// fixed-width encoding, backed by [`u64`] atomic words.
271#[derive(Debug)]
272pub struct AtomicFixedVec<T>
273where
274    T: Storable<u64>,
275{
276    /// The underlying storage for the bit-packed data.
277    pub(crate) storage: Vec<AtomicU64>,
278    /// A pool of locks to protect spanning-word operations.
279    locks: Vec<Mutex<()>>,
280    bit_width: usize,
281    mask: u64,
282    len: usize,
283    _phantom: PhantomData<T>,
284}
285
286// Public API implementation
287impl<T> AtomicFixedVec<T>
288where
289    T: Storable<u64> + Copy + ToPrimitive,
290{
291    /// Creates a builder for constructing an [`AtomicFixedVec`] from a slice.
292    ///
293    /// # Examples
294    ///
295    /// ```
296    /// use compressed_intvec::prelude::*;
297    /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
298    ///
299    /// let data: &[i16] = &[-100, 0, 100, 200];
300    /// let vec: UAtomicFixedVec<i16> = AtomicFixedVec::builder()
301    ///     .bit_width(BitWidth::PowerOfTwo) // Force 16 bits for signed values
302    ///     .build(data)
303    ///     .unwrap();
304    ///
305    /// assert_eq!(vec.len(), 4);
306    /// assert_eq!(vec.bit_width(), 16);
307    /// ```
308    #[inline(always)]
309    pub fn builder() -> builder::AtomicFixedVecBuilder<T> {
310        builder::AtomicFixedVecBuilder::new()
311    }
312
313    /// Returns the number of elements in the vector.
314    #[inline(always)]
315    pub fn len(&self) -> usize {
316        self.len
317    }
318
319    /// Returns `true` if the vector contains no elements.
320    #[inline(always)]
321    pub fn is_empty(&self) -> bool {
322        self.len == 0
323    }
324
325    /// Returns the number of bits used to encode each element.
326    #[inline(always)]
327    pub fn bit_width(&self) -> usize {
328        self.bit_width
329    }
330
331    /// Returns a read-only slice of the underlying atomic storage words.
332    #[inline(always)]
333    pub fn as_slice(&self) -> &[AtomicU64] {
334        &self.storage
335    }
336
337    /// Atomically loads the value at `index`.
338    ///
339    /// [`load`](AtomicFixedVec::load) takes an [`Ordering`] argument which describes the memory ordering
340    /// of this operation. For more information, see the [Rust documentation on
341    /// memory ordering](https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html).
342    ///
343    /// # Panics
344    ///
345    /// Panics if `index` is out of bounds.
346    #[inline(always)]
347    pub fn load(&self, index: usize, order: Ordering) -> T {
348        assert!(index < self.len, "load index out of bounds");
349        let loaded_word = self.atomic_load(index, order);
350        T::from_word(loaded_word)
351    }
352
353    /// Atomically loads the value at `index` without bounds checking.
354    ///
355    /// [`load_unchecked`](AtomicFixedVec::load_unchecked) takes an [`Ordering`] argument which describes the memory ordering
356    /// of this operation. For more information, see the [Rust documentation on
357    /// memory ordering](https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html).
358    ///
359    /// # Safety
360    ///
361    /// Calling this method with an out-of-bounds `index` is undefined behavior.
362    #[inline(always)]
363    pub unsafe fn load_unchecked(&self, index: usize, order: Ordering) -> T {
364        debug_assert!(index < self.len, "load_unchecked index out of bounds");
365        let loaded_word = self.atomic_load(index, order);
366        T::from_word(loaded_word)
367    }
368
369    /// Atomically stores `value` at `index`.
370    ///
371    /// # Panics
372    ///
373    /// Panics if `index` is out of bounds. Note that the stored value is not
374    /// checked for whether it fits in the configured `bit_width` and will be
375    /// truncated if it is too large.
376    #[inline(always)]
377    pub fn store(&self, index: usize, value: T, order: Ordering) {
378        assert!(index < self.len, "store index out of bounds");
379        let value_w = T::into_word(value);
380        self.atomic_store(index, value_w, order);
381    }
382
383    /// Atomically stores `value` at `index` without bounds checking.
384    ///
385    /// # Safety
386    ///
387    /// Calling this method with an out-of-bounds `index` is undefined behavior.
388    /// Note that the stored value is not checked for whether it fits in the
389    /// configured `bit_width` and will be truncated if it is too large.
390    #[inline(always)]
391    pub unsafe fn store_unchecked(&self, index: usize, value: T, order: Ordering) {
392        debug_assert!(index < self.len, "store_unchecked index out of bounds");
393        let value_w = T::into_word(value);
394        self.atomic_store(index, value_w, order);
395    }
396
397    /// Atomically swaps the value at `index` with `value`, returning the
398    /// previous value.
399    ///
400    /// # Panics
401    ///
402    /// Panics if `index` is out of bounds.
403    #[inline(always)]
404    pub fn swap(&self, index: usize, value: T, order: Ordering) -> T {
405        assert!(index < self.len, "swap index out of bounds");
406        let value_w = T::into_word(value);
407        let old_word = self.atomic_swap(index, value_w, order);
408        T::from_word(old_word)
409    }
410
411    /// Atomically swaps the value at `index` with `value` without bounds checking.
412    ///
413    /// # Safety
414    ///
415    /// Calling this method with an out-of-bounds `index` is undefined behavior.
416    #[inline(always)]
417    pub unsafe fn swap_unchecked(&self, index: usize, value: T, order: Ordering) -> T {
418        debug_assert!(index < self.len, "swap_unchecked index out of bounds");
419        let value_w = T::into_word(value);
420        let old_word = self.atomic_swap(index, value_w, order);
421        T::from_word(old_word)
422    }
423
424    /// Atomically compares the value at `index` with `current` and, if they are
425    /// equal, replaces it with `new`.
426    ///
427    /// Returns `Ok` with the previous value on success, or `Err` with the
428    /// actual value if the comparison fails. This is also known as a
429    /// "compare-and-set" (CAS) operation.
430    ///
431    /// # Panics
432    ///
433    /// Panics if `index` is out of bounds.
434    #[inline(always)]
435    pub fn compare_exchange(
436        &self,
437        index: usize,
438        current: T,
439        new: T,
440        success: Ordering,
441        failure: Ordering,
442    ) -> Result<T, T> {
443        assert!(index < self.len, "compare_exchange index out of bounds");
444        let current_w = T::into_word(current);
445        let new_w = T::into_word(new);
446        match self.atomic_compare_exchange(index, current_w, new_w, success, failure) {
447            Ok(w) => Ok(T::from_word(w)),
448            Err(w) => Err(T::from_word(w)),
449        }
450    }
451
452    /// Atomically compares the value at `index` with `current` and, if they are
453    /// equal, replaces it with `new`, without bounds checking.
454    ///
455    /// Returns `Ok` with the previous value on success, or `Err` with the
456    /// actual value if the comparison fails. This is also known as a
457    /// "compare-and-set" (CAS) operation.
458    ///
459    /// # Safety
460    ///
461    /// Calling this method with an out-of-bounds `index` is undefined behavior.
462    #[inline(always)]
463    pub unsafe fn compare_exchange_unchecked(
464        &self,
465        index: usize,
466        current: T,
467        new: T,
468        success: Ordering,
469        failure: Ordering,
470    ) -> Result<T, T> {
471        debug_assert!(
472            index < self.len,
473            "compare_exchange_unchecked index out of bounds"
474        );
475        let current_w = T::into_word(current);
476        let new_w = T::into_word(new);
477        match self.atomic_compare_exchange(index, current_w, new_w, success, failure) {
478            Ok(w) => Ok(T::from_word(w)),
479            Err(w) => Err(T::from_word(w)),
480        }
481    }
482
483    /// Returns the element at `index`, or `None` if out of bounds.
484    ///
485    /// This is an ergonomic wrapper around [`load`](AtomicFixedVec::load) that uses [`Ordering::SeqCst`].
486    #[inline(always)]
487    pub fn get(&self, index: usize) -> Option<T> {
488        if index >= self.len {
489            return None;
490        }
491        Some(self.load(index, Ordering::SeqCst))
492    }
493
494    /// Returns the element at `index` without bounds checking.
495    ///
496    /// # Safety
497    ///
498    /// Calling this method with an out-of-bounds `index` is undefined behavior.
499    #[inline(always)]
500    pub unsafe fn get_unchecked(&self, index: usize) -> T {
501        self.load_unchecked(index, Ordering::SeqCst)
502    }
503
504    /// Returns an iterator over the elements of the vector.
505    ///
506    /// The iterator atomically loads each element using [`Ordering::SeqCst`].
507    pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
508        AtomicFixedVecIter {
509            vec: self,
510            current_index: 0,
511        }
512    }
513
514    /// Returns a parallel iterator over the elements of the vector.
515    ///
516    /// The iterator atomically loads each element using [`Ordering::Relaxed`].
517    /// This operation is highly parallelizable as each element can be loaded
518    /// independently.
519    ///
520    /// # Examples
521    ///
522    /// ```
523    /// # #[cfg(feature = "parallel")] {
524    /// use compressed_intvec::prelude::*;
525    /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
526    /// use rayon::prelude::*;
527    /// use std::sync::atomic::Ordering;
528    ///
529    /// let data: Vec<u32> = (0..1000).collect();
530    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
531    ///     .build(&data)
532    ///     .unwrap();
533    ///
534    /// // Sum the elements in parallel.
535    /// let sum: u32 = vec.par_iter().sum();
536    /// assert_eq!(sum, (0..1000).sum());
537    /// # }
538    /// ```
539    #[cfg(feature = "parallel")]
540    pub fn par_iter(&self) -> impl ParallelIterator<Item = T> + '_
541    where
542        T: Send + Sync,
543    {
544        (0..self.len())
545            .into_par_iter()
546            .map(move |i| self.load(i, Ordering::Relaxed))
547    }
548
549    /// Returns a parallel iterator that allows modifying elements of the vector in place.
550    ///
551    /// Each element is accessed via an [`AtomicMutProxy`], which ensures that
552    /// all modifications are written back atomically.
553    ///
554    /// # Examples
555    ///
556    /// ```
557    /// # #[cfg(feature = "parallel")] {
558    /// use compressed_intvec::prelude::*;
559    /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
560    /// use rayon::prelude::*;
561    /// use std::sync::atomic::Ordering;
562    ///
563    /// let data: Vec<u32> = (0..100).collect();
564    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
565    ///     .bit_width(BitWidth::Explicit(8)) // 2*99 = 198, needs 8 bits
566    ///     .build(&data)
567    ///     .unwrap();
568    ///
569    /// vec.par_iter_mut().for_each(|mut proxy| {
570    ///     *proxy *= 2;
571    /// });
572    ///
573    /// assert_eq!(vec.load(50, Ordering::Relaxed), 100);
574    /// # }
575    /// ```
576    #[cfg(feature = "parallel")]
577    pub fn par_iter_mut(&self) -> impl ParallelIterator<Item = AtomicMutProxy<'_, T>>
578    where
579        T: Send + Sync,
580    {
581        (0..self.len())
582            .into_par_iter()
583            .map(move |i| AtomicMutProxy::new(self, i))
584    }
585}
586
587// Extended atomic RMW operations
588impl<T> AtomicFixedVec<T>
589where
590    T: Storable<u64> + Bounded + Copy + ToPrimitive,
591{
592    /// Atomically adds to the value at `index`, returning the previous value.
593    ///
594    /// This operation is a "read-modify-write" (RMW) operation. It atomically
595    /// reads the value at `index`, adds `val` to it (with wrapping on overflow),
596    /// and writes the result back.
597    ///
598    /// # Panics
599    ///
600    /// Panics if `index` is out of bounds.
601    ///
602    /// # Examples
603    ///
604    /// ```
605    /// use compressed_intvec::prelude::*;
606    /// use std::sync::atomic::Ordering;
607    ///
608    /// // The initial value is 10. The result will be 15, which needs 4 bits.
609    /// let data = vec![10u32, 20];
610    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
611    ///     .bit_width(BitWidth::Explicit(5))
612    ///     .build(&data)
613    ///     .unwrap();
614    ///
615    /// let previous = vec.fetch_add(0, 5, Ordering::SeqCst);
616    ///
617    /// assert_eq!(previous, 10);
618    /// assert_eq!(vec.load(0, Ordering::SeqCst), 15);
619    /// ```
620    #[inline(always)]
621    pub fn fetch_add(&self, index: usize, val: T, order: Ordering) -> T
622    where
623        T: WrappingAdd,
624    {
625        self.atomic_rmw(index, val, order, |a, b| a.wrapping_add(&b))
626    }
627
628    /// Atomically subtracts from the value at `index`, returning the previous value.
629    ///
630    /// This is an atomic "read-modify-write" (RMW) operation.
631    ///
632    /// # Panics
633    ///
634    /// Panics if `index` is out of bounds.
635    ///
636    /// # Examples
637    ///
638    /// ```
639    /// use compressed_intvec::prelude::*;
640    /// use std::sync::atomic::Ordering;
641    ///
642    /// // The initial value is 10. The result will be 5, which fits.
643    /// let data = vec![10u32, 20];
644    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
645    ///     .bit_width(BitWidth::Explicit(5))
646    ///     .build(&data)
647    ///     .unwrap();
648    ///
649    /// let previous = vec.fetch_sub(0, 5, Ordering::SeqCst);
650    ///
651    /// assert_eq!(previous, 10);
652    /// assert_eq!(vec.load(0, Ordering::SeqCst), 5);
653    /// ```
654    #[inline(always)]
655    pub fn fetch_sub(&self, index: usize, val: T, order: Ordering) -> T
656    where
657        T: WrappingSub,
658    {
659        self.atomic_rmw(index, val, order, |a, b| a.wrapping_sub(&b))
660    }
661
662    /// Atomically performs a bitwise AND on the value at `index`, returning the previous value.
663    ///
664    /// This is an atomic "read-modify-write" (RMW) operation.
665    ///
666    /// # Panics
667    ///
668    /// Panics if `index` is out of bounds.
669    ///
670    /// # Examples
671    ///
672    /// ```
673    /// use compressed_intvec::prelude::*;
674    /// use std::sync::atomic::Ordering;
675    ///
676    /// // 0b1100 = 12. Needs 4 bits.
677    /// let data = vec![12u32];
678    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
679    ///     .bit_width(BitWidth::Explicit(4))
680    ///     .build(&data)
681    ///     .unwrap();
682    ///
683    /// // 0b1010 = 10
684    /// let previous = vec.fetch_and(0, 10, Ordering::SeqCst);
685    ///
686    /// assert_eq!(previous, 12);
687    /// // 0b1100 & 0b1010 = 0b1000 = 8
688    /// assert_eq!(vec.load(0, Ordering::SeqCst), 8);
689    /// ```
690    #[inline(always)]
691    pub fn fetch_and(&self, index: usize, val: T, order: Ordering) -> T
692    where
693        T: BitAnd<Output = T>,
694    {
695        self.atomic_rmw(index, val, order, |a, b| a & b)
696    }
697
698    /// Atomically performs a bitwise OR on the value at `index`, returning the previous value.
699    ///
700    /// This is an atomic "read-modify-write" (RMW) operation.
701    ///
702    /// # Panics
703    ///
704    /// Panics if `index` is out of bounds.
705    ///
706    /// # Examples
707    ///
708    /// ```
709    /// use compressed_intvec::prelude::*;
710    /// use std::sync::atomic::Ordering;
711    ///
712    /// // 0b1100 = 12. Needs 4 bits.
713    /// let data = vec![12u32];
714    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
715    ///     .bit_width(BitWidth::Explicit(4))
716    ///     .build(&data)
717    ///     .unwrap();
718    ///
719    /// // 0b1010 = 10
720    /// let previous = vec.fetch_or(0, 10, Ordering::SeqCst);
721    ///
722    /// assert_eq!(previous, 12);
723    /// // 0b1100 | 0b1010 = 0b1110 = 14
724    /// assert_eq!(vec.load(0, Ordering::SeqCst), 14);
725    /// ```
726    #[inline(always)]
727    pub fn fetch_or(&self, index: usize, val: T, order: Ordering) -> T
728    where
729        T: BitOr<Output = T>,
730    {
731        self.atomic_rmw(index, val, order, |a, b| a | b)
732    }
733
734    /// Atomically performs a bitwise XOR on the value at `index`, returning the previous value.
735    ///
736    /// This is an atomic "read-modify-write" (RMW) operation.
737    ///
738    /// # Panics
739    ///
740    /// Panics if `index` is out of bounds.
741    ///
742    /// # Examples
743    ///
744    /// ```
745    /// use compressed_intvec::prelude::*;
746    /// use std::sync::atomic::Ordering;
747    ///
748    /// // 0b1100 = 12. Needs 4 bits.
749    /// let data = vec![12u32];
750    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
751    ///     .bit_width(BitWidth::Explicit(4))
752    ///     .build(&data)
753    ///     .unwrap();
754    ///
755    /// // 0b1010 = 10
756    /// let previous = vec.fetch_xor(0, 10, Ordering::SeqCst);
757    ///
758    /// assert_eq!(previous, 12);
759    /// // 0b1100 ^ 0b1010 = 0b0110 = 6
760    /// assert_eq!(vec.load(0, Ordering::SeqCst), 6);
761    /// ```
762    #[inline(always)]
763    pub fn fetch_xor(&self, index: usize, val: T, order: Ordering) -> T
764    where
765        T: BitXor<Output = T>,
766    {
767        self.atomic_rmw(index, val, order, |a, b| a ^ b)
768    }
769
770    /// Atomically computes the maximum of the value at `index` and `val`, returning the previous value.
771    ///
772    /// This is an atomic "read-modify-write" (RMW) operation.
773    ///
774    /// # Panics
775    ///
776    /// Panics if `index` is out of bounds.
777    ///
778    /// # Examples
779    ///
780    /// ```
781    /// use compressed_intvec::prelude::*;
782    /// use std::sync::atomic::Ordering;
783    ///
784    /// // Value 20 needs 6 bits with zig-zag encoding.
785    /// let data = vec![10i32];
786    /// let vec: SAtomicFixedVec<i32> = AtomicFixedVec::builder()
787    ///     .bit_width(BitWidth::Explicit(6))
788    ///     .build(&data)
789    ///     .unwrap();
790    ///
791    /// // Attempt to store a larger value
792    /// let previous = vec.fetch_max(0, 20, Ordering::SeqCst);
793    /// assert_eq!(previous, 10);
794    /// assert_eq!(vec.load(0, Ordering::SeqCst), 20);
795    ///
796    /// // Attempt to store a smaller value
797    /// let previous2 = vec.fetch_max(0, 5, Ordering::SeqCst);
798    /// assert_eq!(previous2, 20);
799    /// assert_eq!(vec.load(0, Ordering::SeqCst), 20); // Value is unchanged
800    /// ```
801    #[inline(always)]
802    pub fn fetch_max(&self, index: usize, val: T, order: Ordering) -> T
803    where
804        T: Ord,
805    {
806        self.atomic_rmw(index, val, order, |a, b| a.max(b))
807    }
808
809    /// Atomically computes the minimum of the value at `index` and `val`, returning the previous value.
810    ///
811    /// This is an atomic "read-modify-write" (RMW) operation.
812    ///
813    /// # Panics
814    ///
815    /// Panics if `index` is out of bounds.
816    ///
817    /// # Examples
818    ///
819    /// ```
820    /// use compressed_intvec::prelude::*;
821    /// use std::sync::atomic::Ordering;
822    ///
823    /// // Value 10 needs 5 bits with zig-zag encoding.
824    /// let data = vec![10i32];
825    /// let vec: SAtomicFixedVec<i32> = AtomicFixedVec::builder()
826    ///     .bit_width(BitWidth::Explicit(5))
827    ///     .build(&data)
828    ///     .unwrap();
829    ///
830    /// // Attempt to store a smaller value
831    /// let previous = vec.fetch_min(0, 5, Ordering::SeqCst);
832    /// assert_eq!(previous, 10);
833    /// assert_eq!(vec.load(0, Ordering::SeqCst), 5);
834    ///
835    /// // Attempt to store a larger value
836    /// let previous2 = vec.fetch_min(0, 20, Ordering::SeqCst);
837    /// assert_eq!(previous2, 5);
838    /// assert_eq!(vec.load(0, Ordering::SeqCst), 5); // Value is unchanged
839    /// ```
840    #[inline(always)]
841    pub fn fetch_min(&self, index: usize, val: T, order: Ordering) -> T
842    where
843        T: Ord,
844    {
845        self.atomic_rmw(index, val, order, |a, b| a.min(b))
846    }
847
848    /// Atomically modifies the value at `index` using a closure.
849    ///
850    /// Reads the value, applies the function `f`, and attempts to write the
851    /// new value back. If the value has been changed by another thread in the
852    /// meantime, the function is re-evaluated with the new current value.
853    ///
854    /// The closure `f` can return `None` to abort the update.
855    ///
856    /// # Panics
857    ///
858    /// Panics if `index` is out of bounds.
859    ///
860    /// # Examples
861    ///
862    /// ```
863    /// use compressed_intvec::prelude::*;
864    /// use std::sync::atomic::Ordering;
865    ///
866    /// // Value 20 needs 5 bits.
867    /// let data = vec![10u32];
868    /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
869    ///     .bit_width(BitWidth::Explicit(5))
870    ///     .build(&data)
871    ///     .unwrap();
872    ///
873    /// // Successfully update the value
874    /// let result = vec.fetch_update(0, Ordering::SeqCst, Ordering::Relaxed, |val| {
875    ///     Some(val * 2)
876    /// });
877    /// assert_eq!(result, Ok(10));
878    /// assert_eq!(vec.load(0, Ordering::SeqCst), 20);
879    ///
880    /// // Abort the update
881    /// let result_aborted = vec.fetch_update(0, Ordering::SeqCst, Ordering::Relaxed, |val| {
882    ///     if val > 15 {
883    ///         None // Abort if value is > 15
884    ///     } else {
885    ///         Some(val + 1)
886    ///     }
887    /// });
888    /// assert_eq!(result_aborted, Err(20));
889    /// assert_eq!(vec.load(0, Ordering::SeqCst), 20); // Value remains unchanged
890    /// ```
891    pub fn fetch_update<F>(
892        &self,
893        index: usize,
894        success: Ordering,
895        failure: Ordering,
896        mut f: F,
897    ) -> Result<T, T>
898    where
899        F: FnMut(T) -> Option<T>,
900    {
901        let mut current = self.load(index, Ordering::Relaxed);
902        loop {
903            match f(current) {
904                Some(new) => match self.compare_exchange(index, current, new, success, failure) {
905                    Ok(old) => return Ok(old),
906                    Err(actual) => current = actual,
907                },
908                None => return Err(current),
909            }
910        }
911    }
912}
913
914// `TryFrom` implementation.
915impl<T> TryFrom<&[T]> for AtomicFixedVec<T>
916where
917    T: Storable<u64> + Copy + ToPrimitive,
918{
919    type Error = Error;
920
921    /// Creates an `AtomicFixedVec<T>` from a slice using `BitWidth::Minimal`.
922    fn try_from(slice: &[T]) -> Result<Self, Self::Error> {
923        AtomicFixedVec::builder()
924            .bit_width(BitWidth::Minimal)
925            .build(slice)
926    }
927}
928
929// Constructor (internal to the crate, used by the builder).
930impl<T> AtomicFixedVec<T>
931where
932    T: Storable<u64>,
933{
934    /// Creates a new, zero-initialized `AtomicFixedVec`.
935    pub(crate) fn new(bit_width: usize, len: usize) -> Result<Self, Error> {
936        if bit_width > u64::BITS as usize {
937            return Err(Error::InvalidParameters(format!(
938                "bit_width ({}) cannot be greater than the word size ({})",
939                bit_width,
940                u64::BITS
941            )));
942        }
943
944        let mask = if bit_width == u64::BITS as usize {
945            u64::MAX
946        } else {
947            (1u64 << bit_width).wrapping_sub(1)
948        };
949
950        let total_bits = len.saturating_mul(bit_width);
951        let num_words = total_bits.div_ceil(u64::BITS as usize);
952        let buffer_len = if len == 0 { 0 } else { num_words + 1 }; // +1 for padding
953        let storage = (0..buffer_len).map(|_| AtomicU64::new(0)).collect();
954
955        // Heuristic to determine the number of locks for striping.
956        let num_locks = if len == 0 {
957            MIN_LOCKS
958        } else {
959            let num_cores = std::thread::available_parallelism().map_or(MIN_LOCKS, |n| n.get());
960            let target_locks = (num_words / WORDS_PER_LOCK).max(1);
961            (target_locks.max(num_cores) * 2)
962                .next_power_of_two()
963                .min(MAX_LOCKS)
964        };
965
966        let locks = (0..num_locks).map(|_| Mutex::new(())).collect();
967
968        Ok(Self {
969            storage,
970            locks,
971            bit_width,
972            mask,
973            len,
974            _phantom: PhantomData,
975        })
976    }
977}
978
979// --- Private Implementation of Atomic Operations ---
980impl<T> AtomicFixedVec<T>
981where
982    T: Storable<u64> + Copy + ToPrimitive,
983{
984    #[inline(always)]
985    fn atomic_load(&self, index: usize, order: Ordering) -> u64 {
986        let bit_pos = index * self.bit_width;
987        let word_index = bit_pos / u64::BITS as usize;
988        let bit_offset = bit_pos % u64::BITS as usize;
989
990        if bit_offset + self.bit_width <= u64::BITS as usize {
991            // Lock-free path for single-word values.
992            let word = self.storage[word_index].load(order);
993            (word >> bit_offset) & self.mask
994        } else {
995            // Locked path for spanning values.
996            let lock_index = word_index & (self.locks.len() - 1);
997            let _guard = self.locks[lock_index].lock();
998            let low_word = self.storage[word_index].load(Ordering::Relaxed);
999            let high_word = self.storage[word_index + 1].load(Ordering::Relaxed);
1000            let combined =
1001                (low_word >> bit_offset) | (high_word << (u64::BITS as usize - bit_offset));
1002            combined & self.mask
1003        }
1004    }
1005
1006    #[inline(always)]
1007    fn atomic_store(&self, index: usize, value: u64, order: Ordering) {
1008        let bit_pos = index * self.bit_width;
1009        let word_index = bit_pos / u64::BITS as usize;
1010        let bit_offset = bit_pos % u64::BITS as usize;
1011
1012        if bit_offset + self.bit_width <= u64::BITS as usize {
1013            // Lock-free path for single-word values.
1014            let atomic_word_ref = &self.storage[word_index];
1015            let store_mask = self.mask << bit_offset;
1016            let store_value = value << bit_offset;
1017            let mut old_word = atomic_word_ref.load(Ordering::Relaxed);
1018            loop {
1019                let new_word = (old_word & !store_mask) | store_value;
1020                match atomic_word_ref.compare_exchange_weak(
1021                    old_word,
1022                    new_word,
1023                    order,
1024                    Ordering::Relaxed,
1025                ) {
1026                    Ok(_) => break,
1027                    Err(x) => old_word = x,
1028                }
1029            }
1030        } else {
1031            // Locked path for values spanning two words.
1032            let lock_index = word_index & (self.locks.len() - 1);
1033            let _guard = self.locks[lock_index].lock();
1034            // The lock guarantees exclusive access to this multi-word operation.
1035            // We still use atomic operations inside to prevent races with the
1036            // lock-free path, which might be concurrently accessing one of these words.
1037            let low_word_ref = &self.storage[word_index];
1038            let high_word_ref = &self.storage[word_index + 1];
1039
1040            // Modify the lower word.
1041            low_word_ref
1042                .fetch_update(order, Ordering::Relaxed, |mut w| {
1043                    w &= !(u64::MAX << bit_offset);
1044                    w |= value << bit_offset;
1045                    Some(w)
1046                })
1047                .unwrap(); // Should not fail under lock.
1048
1049            // Modify the higher word.
1050            let bits_in_high = (bit_offset + self.bit_width) - u64::BITS as usize;
1051            let high_mask = (1u64 << bits_in_high).wrapping_sub(1);
1052            high_word_ref
1053                .fetch_update(order, Ordering::Relaxed, |mut w| {
1054                    w &= !high_mask;
1055                    w |= value >> (u64::BITS as usize - bit_offset);
1056                    Some(w)
1057                })
1058                .unwrap(); // Should not fail under lock.
1059        }
1060    }
1061
1062    #[inline(always)]
1063    fn atomic_swap(&self, index: usize, value: u64, order: Ordering) -> u64 {
1064        let bit_pos = index * self.bit_width;
1065        let word_index = bit_pos / u64::BITS as usize;
1066        let bit_offset = bit_pos % u64::BITS as usize;
1067
1068        if bit_offset + self.bit_width <= u64::BITS as usize {
1069            // Lock-free path for single-word values.
1070            let atomic_word_ref = &self.storage[word_index];
1071            let store_mask = self.mask << bit_offset;
1072            let store_value = value << bit_offset;
1073            let mut old_word = atomic_word_ref.load(Ordering::Relaxed);
1074            loop {
1075                let new_word = (old_word & !store_mask) | store_value;
1076                match atomic_word_ref.compare_exchange_weak(
1077                    old_word,
1078                    new_word,
1079                    order,
1080                    Ordering::Relaxed,
1081                ) {
1082                    Ok(_) => return (old_word >> bit_offset) & self.mask,
1083                    Err(x) => old_word = x,
1084                }
1085            }
1086        } else {
1087            // Locked path for spanning values.
1088            let lock_index = word_index & (self.locks.len() - 1);
1089            let _guard = self.locks[lock_index].lock();
1090            let old_val = self.atomic_load(index, Ordering::Relaxed);
1091            self.atomic_store(index, value, order);
1092            old_val
1093        }
1094    }
1095
1096    #[inline(always)]
1097    fn atomic_compare_exchange(
1098        &self,
1099        index: usize,
1100        current: u64,
1101        new: u64,
1102        success: Ordering,
1103        failure: Ordering,
1104    ) -> Result<u64, u64> {
1105        let bit_pos = index * self.bit_width;
1106        let word_index = bit_pos / u64::BITS as usize;
1107        let bit_offset = bit_pos % u64::BITS as usize;
1108
1109        if bit_offset + self.bit_width <= u64::BITS as usize {
1110            // Lock-free path for single-word values.
1111            let atomic_word_ref = &self.storage[word_index];
1112            let store_mask = self.mask << bit_offset;
1113            let new_value_shifted = new << bit_offset;
1114            let mut old_word = atomic_word_ref.load(failure);
1115            loop {
1116                let old_val_extracted = (old_word >> bit_offset) & self.mask;
1117                if old_val_extracted != current {
1118                    return Err(old_val_extracted);
1119                }
1120                let new_word = (old_word & !store_mask) | new_value_shifted;
1121                match atomic_word_ref.compare_exchange_weak(old_word, new_word, success, failure) {
1122                    Ok(_) => return Ok(current),
1123                    Err(x) => old_word = x,
1124                }
1125            }
1126        } else {
1127            // Locked path for spanning values.
1128            let lock_index = word_index & (self.locks.len() - 1);
1129            let _guard = self.locks[lock_index].lock();
1130            let old_val = self.atomic_load(index, failure);
1131            if old_val != current {
1132                return Err(old_val);
1133            }
1134            self.atomic_store(index, new, success);
1135            Ok(current)
1136        }
1137    }
1138
1139    /// Generic implementation for all Read-Modify-Write (RMW) operations.
1140    #[inline(always)]
1141    fn atomic_rmw(&self, index: usize, val: T, order: Ordering, op: impl Fn(T, T) -> T) -> T {
1142        // This RMW is implemented as a CAS loop on top of `compare_exchange`.
1143        let mut current = self.load(index, Ordering::Relaxed);
1144        loop {
1145            let new = op(current, val);
1146            match self.compare_exchange(index, current, new, order, Ordering::Relaxed) {
1147                Ok(old) => return old,
1148                Err(actual) => current = actual,
1149            }
1150        }
1151    }
1152}
1153
1154// --- Conversions between AtomicFixedVec and FixedVec ---
1155
1156impl<T, W, E> From<FixedVec<T, W, E, Vec<W>>> for AtomicFixedVec<T>
1157where
1158    T: Storable<W> + Storable<u64>,
1159    W: crate::fixed::traits::Word,
1160    E: dsi_bitstream::prelude::Endianness,
1161{
1162    /// Creates an `AtomicFixedVec` from an owned `FixedVec`.
1163    /// This is a zero-copy operation that re-uses the allocated buffer.
1164    fn from(fixed_vec: FixedVec<T, W, E, Vec<W>>) -> Self {
1165        // SAFETY: This transmutation is safe because [`AtomicU64`] and [`u64`] have
1166        // the same in-memory representation. We are taking ownership of the Vec,
1167        // ensuring no other references to the non-atomic data exist.
1168        let storage = unsafe {
1169            let mut md = std::mem::ManuallyDrop::new(fixed_vec.bits);
1170            Vec::from_raw_parts(md.as_mut_ptr() as *mut AtomicU64, md.len(), md.capacity())
1171        };
1172
1173        let num_words = (fixed_vec.len * fixed_vec.bit_width).div_ceil(u64::BITS as usize);
1174        let num_locks = if fixed_vec.len == 0 {
1175            MIN_LOCKS
1176        } else {
1177            let num_cores = std::thread::available_parallelism().map_or(MIN_LOCKS, |n| n.get());
1178            let target_locks = (num_words / WORDS_PER_LOCK).max(1);
1179            (target_locks.max(num_cores) * 2)
1180                .next_power_of_two()
1181                .min(MAX_LOCKS)
1182        };
1183        let locks = (0..num_locks).map(|_| Mutex::new(())).collect();
1184
1185        Self {
1186            storage,
1187            locks,
1188            bit_width: fixed_vec.bit_width,
1189            mask: fixed_vec.mask.to_u64().unwrap(),
1190            len: fixed_vec.len,
1191            _phantom: PhantomData,
1192        }
1193    }
1194}
1195
1196impl<T> From<AtomicFixedVec<T>> for FixedVec<T, u64, dsi_bitstream::prelude::LE, Vec<u64>>
1197where
1198    T: Storable<u64>,
1199{
1200    /// Creates a `FixedVec` from an owned `AtomicFixedVec`.
1201    /// This is a zero-copy operation that re-uses the allocated buffer.
1202    fn from(atomic_vec: AtomicFixedVec<T>) -> Self {
1203        // SAFETY: This transmutation is safe because [`u64`] and [`AtomicU64`] have
1204        // the same in-memory representation. We are taking ownership of the Vec,
1205        // ensuring no other references to the atomic data exist.
1206        let bits = unsafe {
1207            let mut md = std::mem::ManuallyDrop::new(atomic_vec.storage);
1208            Vec::from_raw_parts(md.as_mut_ptr() as *mut u64, md.len(), md.capacity())
1209        };
1210
1211        unsafe { FixedVec::new_unchecked(bits, atomic_vec.len, atomic_vec.bit_width) }
1212    }
1213}
1214
1215// --- MemDbg and MemSize Implementations ---
1216
1217impl<T> MemSize for AtomicFixedVec<T>
1218where
1219    T: Storable<u64>,
1220{
1221    fn mem_size(&self, flags: SizeFlags) -> usize {
1222        // Since `parking_lot::Mutex` does not implement `CopyType`, we must calculate
1223        // the size of the `locks` vector manually.
1224        let locks_size = if flags.contains(SizeFlags::CAPACITY) {
1225            self.locks.capacity() * core::mem::size_of::<Mutex<()>>()
1226        } else {
1227            self.locks.len() * core::mem::size_of::<Mutex<()>>()
1228        };
1229
1230        core::mem::size_of::<Self>()
1231            + self.storage.mem_size(flags)
1232            + core::mem::size_of::<Vec<Mutex<()>>>()
1233            + locks_size
1234    }
1235}
1236
1237impl<T: Storable<u64>> MemDbgImpl for AtomicFixedVec<T> {
1238    fn _mem_dbg_rec_on(
1239        &self,
1240        writer: &mut impl core::fmt::Write,
1241        total_size: usize,
1242        max_depth: usize,
1243        prefix: &mut String,
1244        _is_last: bool,
1245        flags: DbgFlags,
1246    ) -> core::fmt::Result {
1247        // Manual implementation to avoid trying to lock and inspect mutexes.
1248        self.bit_width
1249            ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags)?;
1250        self.len
1251            ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags)?;
1252        self.mask
1253            ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags)?;
1254
1255        // Display the size of the lock vector, but do not recurse into it.
1256        let locks_size = core::mem::size_of::<Vec<Mutex<()>>>()
1257            + self.locks.capacity() * core::mem::size_of::<Mutex<()>>();
1258        locks_size._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags)?;
1259
1260        self.storage
1261            ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, true, flags)?;
1262        Ok(())
1263    }
1264}
1265
1266/// An iterator over the elements of a borrowed [`AtomicFixedVec`].
1267///
1268/// This struct is created by the [`iter`](AtomicFixedVec::iter) method. It
1269/// atomically loads each value on the fly.
1270pub struct AtomicFixedVecIter<'a, T>
1271where
1272    T: Storable<u64> + Copy + ToPrimitive,
1273{
1274    vec: &'a AtomicFixedVec<T>,
1275    current_index: usize,
1276}
1277
1278impl<T> Iterator for AtomicFixedVecIter<'_, T>
1279where
1280    T: Storable<u64> + Copy + ToPrimitive,
1281{
1282    type Item = T;
1283
1284    #[inline]
1285    fn next(&mut self) -> Option<Self::Item> {
1286        if self.current_index >= self.vec.len() {
1287            return None;
1288        }
1289        // Use the safe get method, which defaults to SeqCst ordering.
1290        let value = self.vec.get(self.current_index).unwrap();
1291        self.current_index += 1;
1292        Some(value)
1293    }
1294
1295    fn size_hint(&self) -> (usize, Option<usize>) {
1296        let remaining = self.vec.len().saturating_sub(self.current_index);
1297        (remaining, Some(remaining))
1298    }
1299}
1300
1301impl<T> ExactSizeIterator for AtomicFixedVecIter<'_, T>
1302where
1303    T: Storable<u64> + Copy + ToPrimitive,
1304{
1305    fn len(&self) -> usize {
1306        self.vec.len().saturating_sub(self.current_index)
1307    }
1308}
1309
1310impl<'a, T> IntoIterator for &'a AtomicFixedVec<T>
1311where
1312    T: Storable<u64> + Copy + ToPrimitive,
1313{
1314    type Item = T;
1315    type IntoIter = AtomicFixedVecIter<'a, T>;
1316
1317    fn into_iter(self) -> Self::IntoIter {
1318        AtomicFixedVecIter {
1319            vec: self,
1320            current_index: 0,
1321        }
1322    }
1323}
1324
1325impl<T> PartialEq for AtomicFixedVec<T>
1326where
1327    T: Storable<u64> + PartialEq + Copy + ToPrimitive,
1328{
1329    /// Checks for equality between two [`AtomicFixedVec`] instances.
1330    ///
1331    /// This comparison is performed by iterating over both vectors and comparing
1332    /// their elements one by one. The reads are done atomically but the overall
1333    /// comparison is not a single atomic operation.
1334    fn eq(&self, other: &Self) -> bool {
1335        if self.len() != other.len() || self.bit_width() != other.bit_width() {
1336            return false;
1337        }
1338        // Use SeqCst for a strong guarantee in tests.
1339        self.iter().zip(other.iter()).all(|(a, b)| a == b)
1340    }
1341}
1342
1343impl<T> Eq for AtomicFixedVec<T> where T: Storable<u64> + Eq + Copy + ToPrimitive {}