compressed_intvec/fixed/atomic/mod.rs
1//! A thread-safe, compressed vector of integers with fixed-width encoding.
2//!
3//! This module provides [`AtomicFixedVec`], a data structure that behaves like
4//! [`FixedVec`] but allows for concurrent access and
5//! modification from multiple threads. It is designed for scenarios where a
6//! large collection of integers must be shared and mutated in a parallel
7//! environment.
8//!
9//! All operations that modify the vector's contents are implemented using
10//! atomic instructions (e.g., compare-and-swap loops), ensuring thread safety
11//! without requiring a global lock.
12//!
13//! # Atomicity Guarantees and Locking
14//!
15//! The atomicity of operations depends on the configured `bit_width`.
16//!
17//! - **Power-of-Two `bit_width`**: When the `bit_width` is a power of two
18//! (e.g., 2, 4, 8, 16, 32), and it evenly divides the 64-bit word size,
19//! most operations can be performed with lock-free atomic instructions.
20//! This is because each element is guaranteed to be fully contained within a
21//! single [`AtomicU64`] word.
22//!
23//! - **Non-Power-of-Two `bit_width`**: When the `bit_width` is not a power of
24//! two, an element's value may span across the boundary of two [`AtomicU64`]
25//! words. Modifying such an element requires updating two words simultaneously,
26//! which cannot be done in a single atomic hardware instruction.
27//!
28//! To handle this case, [`AtomicFixedVec`] uses a technique called _lock striping_.
29//! It maintains a pool of [`parking_lot::Mutex`] locks. When an operation needs
30//! to modify a value that spans two words, it acquires a lock for that specific
31//! memory region. This ensures that the two-word update is itself atomic with
32//! respect to other threads, while still allowing concurrent operations on
33//! different parts of the vector. This approach avoids a single global lock,
34//! preserving a high degree of parallelism.
35//!
36//! > Future version may introduce a more sophisticated locking strategy
37//!
38//! ### Performance Considerations
39//!
40//! The trade-off is between memory compactness and performance. While a
41//! non-power-of-two `bit_width` provides the most space-efficient storage,
42//! it may incur a performance overhead for write operations that span word
43//! boundaries due to locking.
44//!
45//! For write-heavy, performance-critical workloads, choosing a power-of-two
46//! `bit_width` (e.g., by using [`BitWidth::PowerOfTwo`]) is recommended to
47//! ensure all operations remain lock-free.
48//!
49//! # Examples
50//!
51//! ## Basic Usage
52//!
53//! ```
54//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
55//! use compressed_intvec::prelude::*;
56//! use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec};
57//! use std::sync::Arc;
58//! use std::thread;
59//! use std::sync::atomic::Ordering;
60//!
61//! // Create from a slice using the builder.
62//! let initial_data: Vec<u32> = vec![10, 20, 30, 40];
63//! let atomic_vec: Arc<UAtomicFixedVec<u32>> = Arc::new(
64//! AtomicFixedVec::builder()
65//! .build(&initial_data)?
66//! );
67//!
68//! // Share the vector across threads.
69//! let mut handles = vec![];
70//! for i in 0..4 {
71//! let vec_clone = Arc::clone(&atomic_vec);
72//! handles.push(thread::spawn(move || {
73//! // Each thread atomically updates its own slot.
74//! vec_clone.store(i, 63, Ordering::SeqCst);
75//! }));
76//! }
77//! for handle in handles {
78//! handle.join().unwrap();
79//! }
80//! assert_eq!(atomic_vec.load(3, Ordering::SeqCst), 63);
81//! # Ok(())
82//! # }
83//! ```
84//!
85//! ## Storing Signed Integers
86//!
87//! [`AtomicFixedVec`] can also store signed integers. The underlying [`Storable`]
88//! trait uses zig-zag encoding to store signed values efficiently, so that
89//! small negative numbers require few bits, just like small positive numbers.
90//!
91//! ```
92//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
93//! use compressed_intvec::prelude::*;
94//! use compressed_intvec::fixed::{AtomicFixedVec, SAtomicFixedVec};
95//! use std::sync::Arc;
96//! use std::sync::atomic::Ordering;
97//!
98//! // The values range from -2 to 1. To also store -3 later, we need 3 bits.
99//! let initial_data: Vec<i16> = vec![-2, -1, 0, 1];
100//! let atomic_vec: Arc<SAtomicFixedVec<i16>> = Arc::new(
101//! AtomicFixedVec::builder()
102//! .bit_width(BitWidth::Explicit(3)) // Explicitly set bit width
103//! .build(&initial_data)?
104//! );
105//!
106//! assert_eq!(atomic_vec.bit_width(), 3);
107//! assert_eq!(atomic_vec.load(0, Ordering::SeqCst), -2);
108//!
109//! // Atomically update a value.
110//! atomic_vec.store(0, -3, Ordering::SeqCst);
111//! assert_eq!(atomic_vec.load(0, Ordering::SeqCst), -3);
112//! # Ok(())
113//! # }
114//! ```
115//!
116//! //! ## Parallel Iteration
117//!
118//! When the `parallel` feature is enabled (by default is enabled), you can use [`par_iter`](AtomicFixedVec::par_iter) to process
119//! the vector's elements concurrently.
120//!
121//! ```
122//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
123//! # #[cfg(feature = "parallel")] {
124//! use compressed_intvec::prelude::*;
125//! use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec};
126//! use rayon::prelude::*;
127//!
128//! let data: Vec<u32> = (0..10_000).collect();
129//! let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
130//! .build(&data)?;
131//!
132//! // Use the parallel iterator to find the sum of all even numbers.
133//! let sum_of_evens: u32 = vec.par_iter()
134//! .filter(|&x| x % 2 == 0)
135//! .sum();
136//!
137//! let expected_sum: u32 = (0..10_000).filter(|&x| x % 2 == 0).sum();
138//! assert_eq!(sum_of_evens, expected_sum);
139//! # }
140//! # Ok(())
141//! # }
142//! ```
143//!
144//! ### Memory Ordering and Locking
145//!
146//! The memory [`Ordering`] specified in methods like [`load`](AtomicFixedVec::load), [`store`](AtomicFixedVec::store), or
147//! [`fetch_add`](AtomicFixedVec::fetch_add) is always respected, but its interaction with the internal locking
148//! mechanism is important to understand.
149//!
150//! - **Lock-Free Path**: When an element is fully contained within a single
151//! [`u64`] word, the specified [`Ordering`] is applied directly to the underlying
152//! atomic instructions, providing the standard guarantees described in the
153//! Rust documentation.
154//!
155//! - **Locked Path**: When an element spans two [`u64`] words, a fine-grained
156//! mutex is acquired. This lock ensures that the two-word operation is
157//! atomic with respect to other locked operations on the same memory region.
158//! The specified [`Ordering`] is then applied to the atomic writes performed
159//! within the locked critical section. This guarantees that the effects of
160//! the operation become visible to other threads according to the chosen
161//! ordering, but the visibility is still mediated by the mutual exclusion
162//! provided by the lock.
163//!
164
165#[macro_use]
166pub mod macros;
167pub mod builder;
168
169use crate::fixed::traits::Storable;
170use crate::fixed::{BitWidth, Error, FixedVec};
171use mem_dbg::{DbgFlags, MemDbgImpl, MemSize, SizeFlags};
172use num_traits::{Bounded, ToPrimitive, WrappingAdd, WrappingSub};
173use parking_lot::Mutex;
174use std::fmt;
175use std::marker::PhantomData;
176use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut};
177use std::sync::atomic::{AtomicU64, Ordering};
178
179#[cfg(feature = "parallel")]
180use rayon::prelude::*;
181
182/// A thread-safe [`FixedVec`] for unsigned integers.
183pub type UAtomicFixedVec<T> = AtomicFixedVec<T>;
184/// A thread-safe [`FixedVec`] for signed integers.
185pub type SAtomicFixedVec<T> = AtomicFixedVec<T>;
186
187/// The upper bound on the number of locks to prevent excessive memory usage.
188const MAX_LOCKS: usize = 1024;
189/// The minimum number of locks to create, ensuring some striping even for small vectors.
190const MIN_LOCKS: usize = 2;
191/// A heuristic to determine the stripe size: one lock per this many data words.
192const WORDS_PER_LOCK: usize = 64;
193
194/// A proxy object for mutable access to an element within an [`AtomicFixedVec`]
195/// during parallel iteration.
196///
197/// This struct is returned by the [`par_iter_mut`](AtomicFixedVec::par_iter_mut)
198/// parallel iterator. It holds a temporary copy of an element's value. When the
199/// proxy is dropped, its `Drop` implementation atomically writes the (potentially
200/// modified) value back into the parent vector.
201#[cfg(feature = "parallel")]
202pub struct AtomicMutProxy<'a, T>
203where
204 T: Storable<u64> + Copy + ToPrimitive,
205{
206 vec: &'a AtomicFixedVec<T>,
207 index: usize,
208 value: T,
209}
210
211#[cfg(feature = "parallel")]
212impl<T> fmt::Debug for AtomicMutProxy<'_, T>
213where
214 T: Storable<u64> + Copy + ToPrimitive + fmt::Debug,
215{
216 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
217 f.debug_struct("AtomicMutProxy")
218 .field("value", &self.value)
219 .finish()
220 }
221}
222
223#[cfg(feature = "parallel")]
224impl<'a, T> AtomicMutProxy<'a, T>
225where
226 T: Storable<u64> + Copy + ToPrimitive,
227{
228 /// Creates a new `AtomicMutProxy`.
229 ///
230 /// This is called by `par_iter_mut`. It reads the initial value
231 /// from the vector.
232 fn new(vec: &'a AtomicFixedVec<T>, index: usize) -> Self {
233 let value = vec.load(index, Ordering::Relaxed);
234 Self { vec, index, value }
235 }
236
237 /// Consumes the proxy, returning the current value without writing it back.
238 ///
239 /// This can be used to avoid the overhead of a write operation if the value
240 /// was read but not modified.
241 pub fn into_inner(self) -> T {
242 use std::mem;
243
244 let value = self.value;
245 mem::forget(self); // Prevent the proxy from writing back
246 value
247 }
248}
249
250#[cfg(feature = "parallel")]
251impl<T> Deref for AtomicMutProxy<'_, T>
252where
253 T: Storable<u64> + Copy + ToPrimitive,
254{
255 type Target = T;
256
257 fn deref(&self) -> &Self::Target {
258 &self.value
259 }
260}
261
262#[cfg(feature = "parallel")]
263impl<T> DerefMut for AtomicMutProxy<'_, T>
264where
265 T: Storable<u64> + Copy + ToPrimitive,
266{
267 fn deref_mut(&mut self) -> &mut Self::Target {
268 &mut self.value
269 }
270}
271
272#[cfg(feature = "parallel")]
273impl<T> Drop for AtomicMutProxy<'_, T>
274where
275 T: Storable<u64> + Copy + ToPrimitive,
276{
277 /// Writes the potentially modified value back to the [`AtomicFixedVec`] when the
278 /// proxy goes out of scope.
279 fn drop(&mut self) {
280 // The value is copied before being passed to store.
281 // Relaxed ordering is sufficient here because the synchronization is
282 // handled by Rayon's fork-join model. The writes will be visible after
283 // the parallel block completes.
284 self.vec.store(self.index, self.value, Ordering::Relaxed);
285 }
286}
287
288/// A thread-safe, compressed, randomly accessible vector of integers with
289/// fixed-width encoding, backed by [`u64`] atomic words.
290#[derive(Debug)]
291pub struct AtomicFixedVec<T>
292where
293 T: Storable<u64>,
294{
295 /// The underlying storage for the bit-packed data.
296 pub(crate) storage: Vec<AtomicU64>,
297 /// A pool of locks to protect spanning-word operations.
298 locks: Vec<Mutex<()>>,
299 bit_width: usize,
300 mask: u64,
301 len: usize,
302 _phantom: PhantomData<T>,
303}
304
305// Public API implementation
306impl<T> AtomicFixedVec<T>
307where
308 T: Storable<u64> + Copy + ToPrimitive,
309{
310 /// Creates a builder for constructing an [`AtomicFixedVec`] from a slice.
311 ///
312 /// # Examples
313 ///
314 /// ```
315 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
316 /// use compressed_intvec::prelude::*;
317 /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
318 ///
319 /// let data: &[i16] = &[-100, 0, 100, 200];
320 /// let vec: UAtomicFixedVec<i16> = AtomicFixedVec::builder()
321 /// .bit_width(BitWidth::PowerOfTwo) // Force 16 bits for signed values
322 /// .build(data)?;
323 ///
324 /// assert_eq!(vec.len(), 4);
325 /// assert_eq!(vec.bit_width(), 16);
326 /// # Ok(())
327 /// # }
328 /// ```
329 #[inline(always)]
330 pub fn builder() -> builder::AtomicFixedVecBuilder<T> {
331 builder::AtomicFixedVecBuilder::new()
332 }
333
334 /// Returns the number of elements in the vector.
335 #[inline(always)]
336 pub fn len(&self) -> usize {
337 self.len
338 }
339
340 /// Returns `true` if the vector contains no elements.
341 #[inline(always)]
342 pub fn is_empty(&self) -> bool {
343 self.len == 0
344 }
345
346 /// Returns the number of bits used to encode each element.
347 #[inline(always)]
348 pub fn bit_width(&self) -> usize {
349 self.bit_width
350 }
351
352 /// Returns a read-only slice of the underlying atomic storage words.
353 #[inline(always)]
354 pub fn as_slice(&self) -> &[AtomicU64] {
355 &self.storage
356 }
357
358 /// Atomically loads the value at `index`.
359 ///
360 /// [`load`](AtomicFixedVec::load) takes an [`Ordering`] argument which describes the memory ordering
361 /// of this operation. For more information, see the [Rust documentation on
362 /// memory ordering](https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html).
363 ///
364 /// # Panics
365 ///
366 /// Panics if `index` is out of bounds.
367 #[inline(always)]
368 pub fn load(&self, index: usize, order: Ordering) -> T {
369 assert!(index < self.len, "load index out of bounds");
370 let loaded_word = self.atomic_load(index, order);
371 T::from_word(loaded_word)
372 }
373
374 /// Atomically loads the value at `index` without bounds checking.
375 ///
376 /// [`load_unchecked`](AtomicFixedVec::load_unchecked) takes an [`Ordering`] argument which describes the memory ordering
377 /// of this operation. For more information, see the [Rust documentation on
378 /// memory ordering](https://doc.rust-lang.org/std/sync/atomic/enum.Ordering.html).
379 ///
380 /// # Safety
381 ///
382 /// Calling this method with an out-of-bounds `index` is undefined behavior.
383 #[inline(always)]
384 pub unsafe fn load_unchecked(&self, index: usize, order: Ordering) -> T {
385 debug_assert!(index < self.len, "load_unchecked index out of bounds");
386 let loaded_word = self.atomic_load(index, order);
387 T::from_word(loaded_word)
388 }
389
390 /// Atomically stores `value` at `index`.
391 ///
392 /// # Panics
393 ///
394 /// Panics if `index` is out of bounds. Note that the stored value is not
395 /// checked for whether it fits in the configured `bit_width` and will be
396 /// truncated if it is too large.
397 #[inline(always)]
398 pub fn store(&self, index: usize, value: T, order: Ordering) {
399 assert!(index < self.len, "store index out of bounds");
400 let value_w = T::into_word(value);
401 self.atomic_store(index, value_w, order);
402 }
403
404 /// Atomically stores `value` at `index` without bounds checking.
405 ///
406 /// # Safety
407 ///
408 /// Calling this method with an out-of-bounds `index` is undefined behavior.
409 /// Note that the stored value is not checked for whether it fits in the
410 /// configured `bit_width` and will be truncated if it is too large.
411 #[inline(always)]
412 pub unsafe fn store_unchecked(&self, index: usize, value: T, order: Ordering) {
413 debug_assert!(index < self.len, "store_unchecked index out of bounds");
414 let value_w = T::into_word(value);
415 self.atomic_store(index, value_w, order);
416 }
417
418 /// Atomically swaps the value at `index` with `value`, returning the
419 /// previous value.
420 ///
421 /// # Panics
422 ///
423 /// Panics if `index` is out of bounds.
424 #[inline(always)]
425 pub fn swap(&self, index: usize, value: T, order: Ordering) -> T {
426 assert!(index < self.len, "swap index out of bounds");
427 let value_w = T::into_word(value);
428 let old_word = self.atomic_swap(index, value_w, order);
429 T::from_word(old_word)
430 }
431
432 /// Atomically swaps the value at `index` with `value` without bounds checking.
433 ///
434 /// # Safety
435 ///
436 /// Calling this method with an out-of-bounds `index` is undefined behavior.
437 #[inline(always)]
438 pub unsafe fn swap_unchecked(&self, index: usize, value: T, order: Ordering) -> T {
439 debug_assert!(index < self.len, "swap_unchecked index out of bounds");
440 let value_w = T::into_word(value);
441 let old_word = self.atomic_swap(index, value_w, order);
442 T::from_word(old_word)
443 }
444
445 /// Atomically compares the value at `index` with `current` and, if they are
446 /// equal, replaces it with `new`.
447 ///
448 /// Returns `Ok` with the previous value on success, or `Err` with the
449 /// actual value if the comparison fails. This is also known as a
450 /// "compare-and-set" (CAS) operation.
451 ///
452 /// # Panics
453 ///
454 /// Panics if `index` is out of bounds.
455 #[inline(always)]
456 pub fn compare_exchange(
457 &self,
458 index: usize,
459 current: T,
460 new: T,
461 success: Ordering,
462 failure: Ordering,
463 ) -> Result<T, T> {
464 assert!(index < self.len, "compare_exchange index out of bounds");
465 let current_w = T::into_word(current);
466 let new_w = T::into_word(new);
467 match self.atomic_compare_exchange(index, current_w, new_w, success, failure) {
468 Ok(w) => Ok(T::from_word(w)),
469 Err(w) => Err(T::from_word(w)),
470 }
471 }
472
473 /// Atomically compares the value at `index` with `current` and, if they are
474 /// equal, replaces it with `new`, without bounds checking.
475 ///
476 /// Returns `Ok` with the previous value on success, or `Err` with the
477 /// actual value if the comparison fails. This is also known as a
478 /// "compare-and-set" (CAS) operation.
479 ///
480 /// # Safety
481 ///
482 /// Calling this method with an out-of-bounds `index` is undefined behavior.
483 #[inline(always)]
484 pub unsafe fn compare_exchange_unchecked(
485 &self,
486 index: usize,
487 current: T,
488 new: T,
489 success: Ordering,
490 failure: Ordering,
491 ) -> Result<T, T> {
492 debug_assert!(
493 index < self.len,
494 "compare_exchange_unchecked index out of bounds"
495 );
496 let current_w = T::into_word(current);
497 let new_w = T::into_word(new);
498 match self.atomic_compare_exchange(index, current_w, new_w, success, failure) {
499 Ok(w) => Ok(T::from_word(w)),
500 Err(w) => Err(T::from_word(w)),
501 }
502 }
503
504 /// Returns the element at `index`, or `None` if out of bounds.
505 ///
506 /// This is an ergonomic wrapper around [`load`](AtomicFixedVec::load) that uses [`Ordering::SeqCst`].
507 #[inline(always)]
508 pub fn get(&self, index: usize) -> Option<T> {
509 if index >= self.len {
510 return None;
511 }
512 Some(self.load(index, Ordering::SeqCst))
513 }
514
515 /// Returns the element at `index` without bounds checking.
516 ///
517 /// # Safety
518 ///
519 /// Calling this method with an out-of-bounds `index` is undefined behavior.
520 #[inline(always)]
521 pub unsafe fn get_unchecked(&self, index: usize) -> T {
522 unsafe { self.load_unchecked(index, Ordering::SeqCst) }
523 }
524
525 /// Returns an iterator over the elements of the vector.
526 ///
527 /// The iterator atomically loads each element using [`Ordering::SeqCst`].
528 pub fn iter(&self) -> impl Iterator<Item = T> + '_ {
529 AtomicFixedVecIter {
530 vec: self,
531 current_index: 0,
532 }
533 }
534
535 /// Returns a parallel iterator over the elements of the vector.
536 ///
537 /// The iterator atomically loads each element using [`Ordering::Relaxed`].
538 /// This operation is highly parallelizable as each element can be loaded
539 /// independently.
540 ///
541 /// # Examples
542 ///
543 /// ```
544 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
545 /// # #[cfg(feature = "parallel")] {
546 /// use compressed_intvec::prelude::*;
547 /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
548 /// use rayon::prelude::*;
549 /// use std::sync::atomic::Ordering;
550 ///
551 /// let data: Vec<u32> = (0..1000).collect();
552 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
553 /// .build(&data)?;
554 ///
555 /// // Sum the elements in parallel.
556 /// let sum: u32 = vec.par_iter().sum();
557 /// assert_eq!(sum, (0..1000).sum());
558 /// # }
559 /// # Ok(())
560 /// # }
561 /// ```
562 #[cfg(feature = "parallel")]
563 pub fn par_iter(&self) -> impl ParallelIterator<Item = T> + '_
564 where
565 T: Send + Sync,
566 {
567 (0..self.len())
568 .into_par_iter()
569 .map(move |i| self.load(i, Ordering::Relaxed))
570 }
571
572 /// Returns a parallel iterator that allows modifying elements of the vector in place.
573 ///
574 /// Each element is accessed via an [`AtomicMutProxy`], which ensures that
575 /// all modifications are written back atomically.
576 ///
577 /// # Examples
578 ///
579 /// ```
580 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
581 /// # #[cfg(feature = "parallel")] {
582 /// use compressed_intvec::prelude::*;
583 /// use compressed_intvec::fixed::{AtomicFixedVec, UAtomicFixedVec, BitWidth};
584 /// use rayon::prelude::*;
585 /// use std::sync::atomic::Ordering;
586 ///
587 /// let data: Vec<u32> = (0..100).collect();
588 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
589 /// .bit_width(BitWidth::Explicit(8)) // 2*99 = 198, needs 8 bits
590 /// .build(&data)?;
591 ///
592 /// vec.par_iter_mut().for_each(|mut proxy| {
593 /// *proxy *= 2;
594 /// });
595 ///
596 /// assert_eq!(vec.load(50, Ordering::Relaxed), 100);
597 /// # }
598 /// # Ok(())
599 /// # }
600 /// ```
601 #[cfg(feature = "parallel")]
602 pub fn par_iter_mut(&self) -> impl ParallelIterator<Item = AtomicMutProxy<'_, T>>
603 where
604 T: Send + Sync,
605 {
606 (0..self.len())
607 .into_par_iter()
608 .map(move |i| AtomicMutProxy::new(self, i))
609 }
610}
611
612// Extended atomic RMW operations
613impl<T> AtomicFixedVec<T>
614where
615 T: Storable<u64> + Bounded + Copy + ToPrimitive,
616{
617 /// Atomically adds to the value at `index`, returning the previous value.
618 ///
619 /// This operation is a "read-modify-write" (RMW) operation. It atomically
620 /// reads the value at `index`, adds `val` to it (with wrapping on overflow),
621 /// and writes the result back.
622 ///
623 /// # Panics
624 ///
625 /// Panics if `index` is out of bounds.
626 ///
627 /// # Examples
628 ///
629 /// ```
630 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
631 /// use compressed_intvec::prelude::*;
632 /// use std::sync::atomic::Ordering;
633 ///
634 /// // The initial value is 10. The result will be 15, which needs 4 bits.
635 /// let data = vec![10u32, 20];
636 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
637 /// .bit_width(BitWidth::Explicit(5))
638 /// .build(&data)?;
639 ///
640 /// let previous = vec.fetch_add(0, 5, Ordering::SeqCst);
641 ///
642 /// assert_eq!(previous, 10);
643 /// assert_eq!(vec.load(0, Ordering::SeqCst), 15);
644 /// # Ok(())
645 /// # }
646 /// ```
647 #[inline(always)]
648 pub fn fetch_add(&self, index: usize, val: T, order: Ordering) -> T
649 where
650 T: WrappingAdd,
651 {
652 self.atomic_rmw(index, val, order, |a, b| a.wrapping_add(&b))
653 }
654
655 /// Atomically subtracts from the value at `index`, returning the previous value.
656 ///
657 /// This is an atomic "read-modify-write" (RMW) operation.
658 ///
659 /// # Panics
660 ///
661 /// Panics if `index` is out of bounds.
662 ///
663 /// # Examples
664 ///
665 /// ```
666 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
667 /// use compressed_intvec::prelude::*;
668 /// use std::sync::atomic::Ordering;
669 ///
670 /// // The initial value is 10. The result will be 5, which fits.
671 /// let data = vec![10u32, 20];
672 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
673 /// .bit_width(BitWidth::Explicit(5))
674 /// .build(&data)?;
675 ///
676 /// let previous = vec.fetch_sub(0, 5, Ordering::SeqCst);
677 ///
678 /// assert_eq!(previous, 10);
679 /// assert_eq!(vec.load(0, Ordering::SeqCst), 5);
680 /// # Ok(())
681 /// # }
682 /// ```
683 #[inline(always)]
684 pub fn fetch_sub(&self, index: usize, val: T, order: Ordering) -> T
685 where
686 T: WrappingSub,
687 {
688 self.atomic_rmw(index, val, order, |a, b| a.wrapping_sub(&b))
689 }
690
691 /// Atomically performs a bitwise AND on the value at `index`, returning the previous value.
692 ///
693 /// This is an atomic "read-modify-write" (RMW) operation.
694 ///
695 /// # Panics
696 ///
697 /// Panics if `index` is out of bounds.
698 ///
699 /// # Examples
700 ///
701 /// ```
702 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
703 /// use compressed_intvec::prelude::*;
704 /// use std::sync::atomic::Ordering;
705 ///
706 /// // 0b1100 = 12. Needs 4 bits.
707 /// let data = vec![12u32];
708 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
709 /// .bit_width(BitWidth::Explicit(4))
710 /// .build(&data)?;
711 ///
712 /// // 0b1010 = 10
713 /// let previous = vec.fetch_and(0, 10, Ordering::SeqCst);
714 ///
715 /// assert_eq!(previous, 12);
716 /// // 0b1100 & 0b1010 = 0b1000 = 8
717 /// assert_eq!(vec.load(0, Ordering::SeqCst), 8);
718 /// # Ok(())
719 /// # }
720 /// ```
721 #[inline(always)]
722 pub fn fetch_and(&self, index: usize, val: T, order: Ordering) -> T
723 where
724 T: BitAnd<Output = T>,
725 {
726 self.atomic_rmw(index, val, order, |a, b| a & b)
727 }
728
729 /// Atomically performs a bitwise OR on the value at `index`, returning the previous value.
730 ///
731 /// This is an atomic "read-modify-write" (RMW) operation.
732 ///
733 /// # Panics
734 ///
735 /// Panics if `index` is out of bounds.
736 ///
737 /// # Examples
738 ///
739 /// ```
740 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
741 /// use compressed_intvec::prelude::*;
742 /// use std::sync::atomic::Ordering;
743 ///
744 /// // 0b1100 = 12. Needs 4 bits.
745 /// let data = vec![12u32];
746 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
747 /// .bit_width(BitWidth::Explicit(4))
748 /// .build(&data)?;
749 ///
750 /// // 0b1010 = 10
751 /// let previous = vec.fetch_or(0, 10, Ordering::SeqCst);
752 ///
753 /// assert_eq!(previous, 12);
754 /// // 0b1100 | 0b1010 = 0b1110 = 14
755 /// assert_eq!(vec.load(0, Ordering::SeqCst), 14);
756 /// # Ok(())
757 /// # }
758 /// ```
759 #[inline(always)]
760 pub fn fetch_or(&self, index: usize, val: T, order: Ordering) -> T
761 where
762 T: BitOr<Output = T>,
763 {
764 self.atomic_rmw(index, val, order, |a, b| a | b)
765 }
766
767 /// Atomically performs a bitwise XOR on the value at `index`, returning the previous value.
768 ///
769 /// This is an atomic "read-modify-write" (RMW) operation.
770 ///
771 /// # Panics
772 ///
773 /// Panics if `index` is out of bounds.
774 ///
775 /// # Examples
776 ///
777 /// ```
778 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
779 /// use compressed_intvec::prelude::*;
780 /// use std::sync::atomic::Ordering;
781 ///
782 /// // 0b1100 = 12. Needs 4 bits.
783 /// let data = vec![12u32];
784 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
785 /// .bit_width(BitWidth::Explicit(4))
786 /// .build(&data)?;
787 ///
788 /// // 0b1010 = 10
789 /// let previous = vec.fetch_xor(0, 10, Ordering::SeqCst);
790 ///
791 /// assert_eq!(previous, 12);
792 /// // 0b1100 ^ 0b1010 = 0b0110 = 6
793 /// assert_eq!(vec.load(0, Ordering::SeqCst), 6);
794 /// # Ok(())
795 /// # }
796 /// ```
797 #[inline(always)]
798 pub fn fetch_xor(&self, index: usize, val: T, order: Ordering) -> T
799 where
800 T: BitXor<Output = T>,
801 {
802 self.atomic_rmw(index, val, order, |a, b| a ^ b)
803 }
804
805 /// Atomically computes the maximum of the value at `index` and `val`, returning the previous value.
806 ///
807 /// This is an atomic "read-modify-write" (RMW) operation.
808 ///
809 /// # Panics
810 ///
811 /// Panics if `index` is out of bounds.
812 ///
813 /// # Examples
814 ///
815 /// ```
816 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
817 /// use compressed_intvec::prelude::*;
818 /// use std::sync::atomic::Ordering;
819 ///
820 /// // Value 20 needs 6 bits with zig-zag encoding.
821 /// let data = vec![10i32];
822 /// let vec: SAtomicFixedVec<i32> = AtomicFixedVec::builder()
823 /// .bit_width(BitWidth::Explicit(6))
824 /// .build(&data)?;
825 ///
826 /// // Attempt to store a larger value
827 /// let previous = vec.fetch_max(0, 20, Ordering::SeqCst);
828 /// assert_eq!(previous, 10);
829 /// assert_eq!(vec.load(0, Ordering::SeqCst), 20);
830 ///
831 /// // Attempt to store a smaller value
832 /// let previous2 = vec.fetch_max(0, 5, Ordering::SeqCst);
833 /// assert_eq!(previous2, 20);
834 /// assert_eq!(vec.load(0, Ordering::SeqCst), 20); // Value is unchanged
835 /// # Ok(())
836 /// # }
837 /// ```
838 #[inline(always)]
839 pub fn fetch_max(&self, index: usize, val: T, order: Ordering) -> T
840 where
841 T: Ord,
842 {
843 self.atomic_rmw(index, val, order, |a, b| a.max(b))
844 }
845
846 /// Atomically computes the minimum of the value at `index` and `val`, returning the previous value.
847 ///
848 /// This is an atomic "read-modify-write" (RMW) operation.
849 ///
850 /// # Panics
851 ///
852 /// Panics if `index` is out of bounds.
853 ///
854 /// # Examples
855 ///
856 /// ```
857 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
858 /// use compressed_intvec::prelude::*;
859 /// use std::sync::atomic::Ordering;
860 ///
861 /// // Value 10 needs 5 bits with zig-zag encoding.
862 /// let data = vec![10i32];
863 /// let vec: SAtomicFixedVec<i32> = AtomicFixedVec::builder()
864 /// .bit_width(BitWidth::Explicit(5))
865 /// .build(&data)?;
866 ///
867 /// // Attempt to store a smaller value
868 /// let previous = vec.fetch_min(0, 5, Ordering::SeqCst);
869 /// assert_eq!(previous, 10);
870 /// assert_eq!(vec.load(0, Ordering::SeqCst), 5);
871 ///
872 /// // Attempt to store a larger value
873 /// let previous2 = vec.fetch_min(0, 20, Ordering::SeqCst);
874 /// assert_eq!(previous2, 5);
875 /// assert_eq!(vec.load(0, Ordering::SeqCst), 5); // Value is unchanged
876 /// # Ok(())
877 /// # }
878 /// ```
879 #[inline(always)]
880 pub fn fetch_min(&self, index: usize, val: T, order: Ordering) -> T
881 where
882 T: Ord,
883 {
884 self.atomic_rmw(index, val, order, |a, b| a.min(b))
885 }
886
887 /// Atomically modifies the value at `index` using a closure.
888 ///
889 /// Reads the value, applies the function `f`, and attempts to write the
890 /// new value back. If the value has been changed by another thread in the
891 /// meantime, the function is re-evaluated with the new current value.
892 ///
893 /// The closure `f` can return `None` to abort the update.
894 ///
895 /// # Panics
896 ///
897 /// Panics if `index` is out of bounds.
898 ///
899 /// # Examples
900 ///
901 /// ```
902 /// # fn main() -> Result<(), Box<dyn std::error::Error>> {
903 /// use compressed_intvec::prelude::*;
904 /// use std::sync::atomic::Ordering;
905 ///
906 /// // Value 20 needs 5 bits.
907 /// let data = vec![10u32];
908 /// let vec: UAtomicFixedVec<u32> = AtomicFixedVec::builder()
909 /// .bit_width(BitWidth::Explicit(5))
910 /// .build(&data)?;
911 ///
912 /// // Successfully update the value
913 /// let result = vec.fetch_update(0, Ordering::SeqCst, Ordering::Relaxed, |val| {
914 /// Some(val * 2)
915 /// });
916 /// assert_eq!(result, Ok(10));
917 /// assert_eq!(vec.load(0, Ordering::SeqCst), 20);
918 ///
919 /// // Abort the update
920 /// let result_aborted = vec.fetch_update(0, Ordering::SeqCst, Ordering::Relaxed, |val| {
921 /// if val > 15 {
922 /// None // Abort if value is > 15
923 /// } else {
924 /// Some(val + 1)
925 /// }
926 /// });
927 /// assert_eq!(result_aborted, Err(20));
928 /// assert_eq!(vec.load(0, Ordering::SeqCst), 20); // Value remains unchanged
929 /// # Ok(())
930 /// # }
931 /// ```
932 pub fn fetch_update<F>(
933 &self,
934 index: usize,
935 success: Ordering,
936 failure: Ordering,
937 mut f: F,
938 ) -> Result<T, T>
939 where
940 F: FnMut(T) -> Option<T>,
941 {
942 let mut current = self.load(index, Ordering::Relaxed);
943 loop {
944 match f(current) {
945 Some(new) => match self.compare_exchange(index, current, new, success, failure) {
946 Ok(old) => return Ok(old),
947 Err(actual) => current = actual,
948 },
949 None => return Err(current),
950 }
951 }
952 }
953}
954
955// `TryFrom` implementation.
956impl<T> TryFrom<&[T]> for AtomicFixedVec<T>
957where
958 T: Storable<u64> + Copy + ToPrimitive,
959{
960 type Error = Error;
961
962 /// Creates an `AtomicFixedVec<T>` from a slice using `BitWidth::Minimal`.
963 fn try_from(slice: &[T]) -> Result<Self, Self::Error> {
964 AtomicFixedVec::builder()
965 .bit_width(BitWidth::Minimal)
966 .build(slice)
967 }
968}
969
970// Constructor (internal to the crate, used by the builder).
971impl<T> AtomicFixedVec<T>
972where
973 T: Storable<u64>,
974{
975 /// Creates a new, zero-initialized `AtomicFixedVec`.
976 pub(crate) fn new(bit_width: usize, len: usize) -> Result<Self, Error> {
977 if bit_width > u64::BITS as usize {
978 return Err(Error::InvalidParameters(format!(
979 "bit_width ({}) cannot be greater than the word size ({})",
980 bit_width,
981 u64::BITS
982 )));
983 }
984
985 let mask = if bit_width == u64::BITS as usize {
986 u64::MAX
987 } else {
988 (1u64 << bit_width).wrapping_sub(1)
989 };
990
991 let total_bits = len.saturating_mul(bit_width);
992 let num_words = total_bits.div_ceil(u64::BITS as usize);
993 let buffer_len = if len == 0 { 0 } else { num_words + 1 }; // +1 for padding
994 let storage = (0..buffer_len).map(|_| AtomicU64::new(0)).collect();
995
996 // Heuristic to determine the number of locks for striping.
997 let num_locks = if len == 0 {
998 MIN_LOCKS
999 } else {
1000 let num_cores = std::thread::available_parallelism().map_or(MIN_LOCKS, |n| n.get());
1001 let target_locks = (num_words / WORDS_PER_LOCK).max(1);
1002 (target_locks.max(num_cores) * 2)
1003 .next_power_of_two()
1004 .min(MAX_LOCKS)
1005 };
1006
1007 let locks = (0..num_locks).map(|_| Mutex::new(())).collect();
1008
1009 Ok(Self {
1010 storage,
1011 locks,
1012 bit_width,
1013 mask,
1014 len,
1015 _phantom: PhantomData,
1016 })
1017 }
1018}
1019
1020// --- Private Implementation of Atomic Operations ---
1021impl<T> AtomicFixedVec<T>
1022where
1023 T: Storable<u64> + Copy + ToPrimitive,
1024{
1025 #[inline(always)]
1026 fn atomic_load(&self, index: usize, order: Ordering) -> u64 {
1027 let bit_pos = index * self.bit_width;
1028 let word_index = bit_pos / u64::BITS as usize;
1029 let bit_offset = bit_pos % u64::BITS as usize;
1030
1031 if bit_offset + self.bit_width <= u64::BITS as usize {
1032 // Lock-free path for single-word values.
1033 let word = self.storage[word_index].load(order);
1034 (word >> bit_offset) & self.mask
1035 } else {
1036 // Locked path for spanning values.
1037 let lock_index = word_index & (self.locks.len() - 1);
1038 let _guard = self.locks[lock_index].lock();
1039 let low_word = self.storage[word_index].load(Ordering::Relaxed);
1040 let high_word = self.storage[word_index + 1].load(Ordering::Relaxed);
1041 let combined =
1042 (low_word >> bit_offset) | (high_word << (u64::BITS as usize - bit_offset));
1043 combined & self.mask
1044 }
1045 }
1046
1047 #[inline(always)]
1048 fn atomic_store(&self, index: usize, value: u64, order: Ordering) {
1049 let bit_pos = index * self.bit_width;
1050 let word_index = bit_pos / u64::BITS as usize;
1051 let bit_offset = bit_pos % u64::BITS as usize;
1052
1053 if bit_offset + self.bit_width <= u64::BITS as usize {
1054 // Lock-free path for single-word values.
1055 let atomic_word_ref = &self.storage[word_index];
1056 let store_mask = self.mask << bit_offset;
1057 let store_value = value << bit_offset;
1058 let mut old_word = atomic_word_ref.load(Ordering::Relaxed);
1059 loop {
1060 let new_word = (old_word & !store_mask) | store_value;
1061 match atomic_word_ref.compare_exchange_weak(
1062 old_word,
1063 new_word,
1064 order,
1065 Ordering::Relaxed,
1066 ) {
1067 Ok(_) => break,
1068 Err(x) => old_word = x,
1069 }
1070 }
1071 } else {
1072 // Locked path for values spanning two words.
1073 let lock_index = word_index & (self.locks.len() - 1);
1074 let _guard = self.locks[lock_index].lock();
1075 // The lock guarantees exclusive access to this multi-word operation.
1076 // We still use atomic operations inside to prevent races with the
1077 // lock-free path, which might be concurrently accessing one of these words.
1078 let low_word_ref = &self.storage[word_index];
1079 let high_word_ref = &self.storage[word_index + 1];
1080
1081 // Modify the lower word.
1082 low_word_ref
1083 .fetch_update(order, Ordering::Relaxed, |mut w| {
1084 w &= !(u64::MAX << bit_offset);
1085 w |= value << bit_offset;
1086 Some(w)
1087 })
1088 .unwrap(); // Should not fail under lock.
1089
1090 // Modify the higher word.
1091 let bits_in_high = (bit_offset + self.bit_width) - u64::BITS as usize;
1092 let high_mask = (1u64 << bits_in_high).wrapping_sub(1);
1093 high_word_ref
1094 .fetch_update(order, Ordering::Relaxed, |mut w| {
1095 w &= !high_mask;
1096 w |= value >> (u64::BITS as usize - bit_offset);
1097 Some(w)
1098 })
1099 .unwrap(); // Should not fail under lock.
1100 }
1101 }
1102
1103 #[inline(always)]
1104 fn atomic_swap(&self, index: usize, value: u64, order: Ordering) -> u64 {
1105 let bit_pos = index * self.bit_width;
1106 let word_index = bit_pos / u64::BITS as usize;
1107 let bit_offset = bit_pos % u64::BITS as usize;
1108
1109 if bit_offset + self.bit_width <= u64::BITS as usize {
1110 // Lock-free path for single-word values.
1111 let atomic_word_ref = &self.storage[word_index];
1112 let store_mask = self.mask << bit_offset;
1113 let store_value = value << bit_offset;
1114 let mut old_word = atomic_word_ref.load(Ordering::Relaxed);
1115 loop {
1116 let new_word = (old_word & !store_mask) | store_value;
1117 match atomic_word_ref.compare_exchange_weak(
1118 old_word,
1119 new_word,
1120 order,
1121 Ordering::Relaxed,
1122 ) {
1123 Ok(_) => return (old_word >> bit_offset) & self.mask,
1124 Err(x) => old_word = x,
1125 }
1126 }
1127 } else {
1128 // Locked path for spanning values.
1129 let lock_index = word_index & (self.locks.len() - 1);
1130 let _guard = self.locks[lock_index].lock();
1131 let old_val = self.atomic_load(index, Ordering::Relaxed);
1132 self.atomic_store(index, value, order);
1133 old_val
1134 }
1135 }
1136
1137 #[inline(always)]
1138 fn atomic_compare_exchange(
1139 &self,
1140 index: usize,
1141 current: u64,
1142 new: u64,
1143 success: Ordering,
1144 failure: Ordering,
1145 ) -> Result<u64, u64> {
1146 let bit_pos = index * self.bit_width;
1147 let word_index = bit_pos / u64::BITS as usize;
1148 let bit_offset = bit_pos % u64::BITS as usize;
1149
1150 if bit_offset + self.bit_width <= u64::BITS as usize {
1151 // Lock-free path for single-word values.
1152 let atomic_word_ref = &self.storage[word_index];
1153 let store_mask = self.mask << bit_offset;
1154 let new_value_shifted = new << bit_offset;
1155 let mut old_word = atomic_word_ref.load(failure);
1156 loop {
1157 let old_val_extracted = (old_word >> bit_offset) & self.mask;
1158 if old_val_extracted != current {
1159 return Err(old_val_extracted);
1160 }
1161 let new_word = (old_word & !store_mask) | new_value_shifted;
1162 match atomic_word_ref.compare_exchange_weak(old_word, new_word, success, failure) {
1163 Ok(_) => return Ok(current),
1164 Err(x) => old_word = x,
1165 }
1166 }
1167 } else {
1168 // Locked path for spanning values.
1169 let lock_index = word_index & (self.locks.len() - 1);
1170 let _guard = self.locks[lock_index].lock();
1171 let old_val = self.atomic_load(index, failure);
1172 if old_val != current {
1173 return Err(old_val);
1174 }
1175 self.atomic_store(index, new, success);
1176 Ok(current)
1177 }
1178 }
1179
1180 /// Generic implementation for all Read-Modify-Write (RMW) operations.
1181 #[inline(always)]
1182 fn atomic_rmw(&self, index: usize, val: T, order: Ordering, op: impl Fn(T, T) -> T) -> T {
1183 // This RMW is implemented as a CAS loop on top of `compare_exchange`.
1184 let mut current = self.load(index, Ordering::Relaxed);
1185 loop {
1186 let new = op(current, val);
1187 match self.compare_exchange(index, current, new, order, Ordering::Relaxed) {
1188 Ok(old) => return old,
1189 Err(actual) => current = actual,
1190 }
1191 }
1192 }
1193}
1194
1195// --- Conversions between AtomicFixedVec and FixedVec ---
1196
1197impl<T, W, E> From<FixedVec<T, W, E, Vec<W>>> for AtomicFixedVec<T>
1198where
1199 T: Storable<W> + Storable<u64>,
1200 W: crate::fixed::traits::Word,
1201 E: dsi_bitstream::prelude::Endianness,
1202{
1203 /// Creates an `AtomicFixedVec` from an owned `FixedVec`.
1204 /// This is a zero-copy operation that re-uses the allocated buffer.
1205 fn from(fixed_vec: FixedVec<T, W, E, Vec<W>>) -> Self {
1206 // SAFETY: This transmutation is safe because [`AtomicU64`] and [`u64`] have
1207 // the same in-memory representation. We are taking ownership of the Vec,
1208 // ensuring no other references to the non-atomic data exist.
1209 let storage = unsafe {
1210 let mut md = std::mem::ManuallyDrop::new(fixed_vec.bits);
1211 Vec::from_raw_parts(md.as_mut_ptr() as *mut AtomicU64, md.len(), md.capacity())
1212 };
1213
1214 let num_words = (fixed_vec.len * fixed_vec.bit_width).div_ceil(u64::BITS as usize);
1215 let num_locks = if fixed_vec.len == 0 {
1216 MIN_LOCKS
1217 } else {
1218 let num_cores = std::thread::available_parallelism().map_or(MIN_LOCKS, |n| n.get());
1219 let target_locks = (num_words / WORDS_PER_LOCK).max(1);
1220 (target_locks.max(num_cores) * 2)
1221 .next_power_of_two()
1222 .min(MAX_LOCKS)
1223 };
1224 let locks = (0..num_locks).map(|_| Mutex::new(())).collect();
1225
1226 Self {
1227 storage,
1228 locks,
1229 bit_width: fixed_vec.bit_width,
1230 mask: fixed_vec.mask.to_u64().unwrap(),
1231 len: fixed_vec.len,
1232 _phantom: PhantomData,
1233 }
1234 }
1235}
1236
1237impl<T> From<AtomicFixedVec<T>> for FixedVec<T, u64, dsi_bitstream::prelude::LE, Vec<u64>>
1238where
1239 T: Storable<u64>,
1240{
1241 /// Creates a `FixedVec` from an owned `AtomicFixedVec`.
1242 /// This is a zero-copy operation that re-uses the allocated buffer.
1243 fn from(atomic_vec: AtomicFixedVec<T>) -> Self {
1244 // SAFETY: This transmutation is safe because [`u64`] and [`AtomicU64`] have
1245 // the same in-memory representation. We are taking ownership of the Vec,
1246 // ensuring no other references to the atomic data exist.
1247 let bits = unsafe {
1248 let mut md = std::mem::ManuallyDrop::new(atomic_vec.storage);
1249 Vec::from_raw_parts(md.as_mut_ptr() as *mut u64, md.len(), md.capacity())
1250 };
1251
1252 unsafe { FixedVec::new_unchecked(bits, atomic_vec.len, atomic_vec.bit_width) }
1253 }
1254}
1255
1256// --- MemDbg and MemSize Implementations ---
1257
1258impl<T> MemSize for AtomicFixedVec<T>
1259where
1260 T: Storable<u64>,
1261{
1262 fn mem_size_rec(&self, flags: SizeFlags, _refs: &mut mem_dbg::HashMap<usize, usize>) -> usize {
1263 // Since `parking_lot::Mutex` does not implement `CopyType`, we must calculate
1264 // the size of the `locks` vector manually.
1265 let locks_size = if flags.contains(SizeFlags::CAPACITY) {
1266 self.locks.capacity() * core::mem::size_of::<Mutex<()>>()
1267 } else {
1268 self.locks.len() * core::mem::size_of::<Mutex<()>>()
1269 };
1270
1271 core::mem::size_of::<Self>()
1272 + self.storage.mem_size(flags)
1273 + core::mem::size_of::<Vec<Mutex<()>>>()
1274 + locks_size
1275 }
1276}
1277
1278impl<T: Storable<u64>> MemDbgImpl for AtomicFixedVec<T> {
1279 fn _mem_dbg_rec_on(
1280 &self,
1281 writer: &mut impl core::fmt::Write,
1282 total_size: usize,
1283 max_depth: usize,
1284 prefix: &mut String,
1285 _is_last: bool,
1286 flags: DbgFlags,
1287 _dbg_refs: &mut mem_dbg::HashSet<usize>,
1288 ) -> core::fmt::Result {
1289 // Manual implementation to avoid trying to lock and inspect mutexes.
1290 self.bit_width
1291 ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags, _dbg_refs)?;
1292 self.len
1293 ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags, _dbg_refs)?;
1294 self.mask
1295 ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags, _dbg_refs)?;
1296
1297 // Display the size of the lock vector, but do not recurse into it.
1298 let locks_size = core::mem::size_of::<Vec<Mutex<()>>>()
1299 + self.locks.capacity() * core::mem::size_of::<Mutex<()>>();
1300 locks_size._mem_dbg_rec_on(writer, total_size, max_depth, prefix, false, flags, _dbg_refs)?;
1301
1302 self.storage
1303 ._mem_dbg_rec_on(writer, total_size, max_depth, prefix, true, flags, _dbg_refs)?;
1304 Ok(())
1305 }
1306}
1307
1308/// An iterator over the elements of a borrowed [`AtomicFixedVec`].
1309///
1310/// This struct is created by the [`iter`](AtomicFixedVec::iter) method. It
1311/// atomically loads each value on the fly.
1312pub struct AtomicFixedVecIter<'a, T>
1313where
1314 T: Storable<u64> + Copy + ToPrimitive,
1315{
1316 vec: &'a AtomicFixedVec<T>,
1317 current_index: usize,
1318}
1319
1320impl<T> Iterator for AtomicFixedVecIter<'_, T>
1321where
1322 T: Storable<u64> + Copy + ToPrimitive,
1323{
1324 type Item = T;
1325
1326 #[inline]
1327 fn next(&mut self) -> Option<Self::Item> {
1328 if self.current_index >= self.vec.len() {
1329 return None;
1330 }
1331 // Use the safe get method, which defaults to SeqCst ordering.
1332 let value = self.vec.get(self.current_index).unwrap();
1333 self.current_index += 1;
1334 Some(value)
1335 }
1336
1337 fn size_hint(&self) -> (usize, Option<usize>) {
1338 let remaining = self.vec.len().saturating_sub(self.current_index);
1339 (remaining, Some(remaining))
1340 }
1341}
1342
1343impl<T> ExactSizeIterator for AtomicFixedVecIter<'_, T>
1344where
1345 T: Storable<u64> + Copy + ToPrimitive,
1346{
1347 fn len(&self) -> usize {
1348 self.vec.len().saturating_sub(self.current_index)
1349 }
1350}
1351
1352impl<'a, T> IntoIterator for &'a AtomicFixedVec<T>
1353where
1354 T: Storable<u64> + Copy + ToPrimitive,
1355{
1356 type Item = T;
1357 type IntoIter = AtomicFixedVecIter<'a, T>;
1358
1359 fn into_iter(self) -> Self::IntoIter {
1360 AtomicFixedVecIter {
1361 vec: self,
1362 current_index: 0,
1363 }
1364 }
1365}
1366
1367impl<T> PartialEq for AtomicFixedVec<T>
1368where
1369 T: Storable<u64> + PartialEq + Copy + ToPrimitive,
1370{
1371 /// Checks for equality between two [`AtomicFixedVec`] instances.
1372 ///
1373 /// This comparison is performed by iterating over both vectors and comparing
1374 /// their elements one by one. The reads are done atomically but the overall
1375 /// comparison is not a single atomic operation.
1376 fn eq(&self, other: &Self) -> bool {
1377 if self.len() != other.len() || self.bit_width() != other.bit_width() {
1378 return false;
1379 }
1380 // Use SeqCst for a strong guarantee in tests.
1381 self.iter().zip(other.iter()).all(|(a, b)| a == b)
1382 }
1383}
1384
1385impl<T> Eq for AtomicFixedVec<T> where T: Storable<u64> + Eq + Copy + ToPrimitive {}