velocityx/
lib.rs

1//! # VelocityX
2//!
3//! A comprehensive lock-free data structures library designed for high-performance concurrent programming in Rust.
4//!
5//! ## 🚀 Features
6//!
7//! - **MPMC Queue**: Multi-producer, multi-consumer bounded queue with zero locks
8//! - **Concurrent HashMap**: Lock-free reads with concurrent modifications using striped locking
9//! - **Work-Stealing Deque**: Chase-Lev deque for task scheduling and parallel workload distribution
10//! - **Lock-Free Stack**: Treiber's algorithm stack with wait-free push and lock-free pop operations
11//! - **Performance Metrics**: Real-time monitoring with `MetricsCollector` trait across all data structures
12//!
13//! ## 🎯 Philosophy
14//!
15//! VelocityX focuses on providing:
16//! - Zero-cost abstractions with optimal performance
17//! - Comprehensive safety guarantees through Rust's type system
18//! - Ergonomic APIs that guide users toward correct concurrent programming patterns
19//! - Extensive documentation and real-world usage examples
20//! - Production-ready performance monitoring and metrics collection
21//!
22//! ## âš¡ Quick Start
23//!
24//! ```rust
25//! use velocityx::{MpmcQueue, MetricsCollector};
26//!
27//! let queue = MpmcQueue::new(100);
28//! queue.push(42);
29//! assert_eq!(queue.pop(), Some(42));
30//!
31//! // Get performance metrics
32//! let metrics = queue.metrics();
33//! println!("Success rate: {:.2}%", metrics.success_rate());
34//! ```
35//!
36//! ## 🔒 Thread Safety
37//!
38//! All data structures in VelocityX are designed to be thread-safe and can be safely shared
39//! across threads without additional synchronization primitives.
40//!
41//! ## 📊 Performance
42//!
43//! VelocityX is optimized for modern multi-core processors with careful attention to:
44//! - Cache-line alignment and padding to prevent false sharing
45//! - Memory ordering semantics for correctness and performance
46//! - Contention minimization through lock-free algorithms
47//! - NUMA-aware design where applicable
48//! - Real-time performance monitoring with minimal overhead
49
50#![no_std]
51
52#[cfg(feature = "std")]
53extern crate std;
54
55#[cfg(feature = "std")]
56pub mod deque;
57#[cfg(feature = "std")]
58pub mod map;
59pub mod metrics;
60#[cfg(feature = "std")]
61pub mod queue;
62#[cfg(feature = "std")]
63pub mod stack;
64
65#[cfg(feature = "std")]
66pub use crate::deque::work_stealing::WorkStealingDeque;
67#[cfg(feature = "std")]
68pub use crate::map::concurrent::ConcurrentHashMap;
69#[cfg(feature = "std")]
70pub use crate::metrics::{MetricsCollector, PerformanceMetrics};
71#[cfg(all(feature = "std", feature = "lockfree"))]
72pub use crate::queue::LockFreeMpmcQueue;
73#[cfg(feature = "std")]
74pub use crate::queue::MpmcQueue;
75#[cfg(feature = "std")]
76pub use crate::stack::LockFreeStack;
77
78/// Common utilities and helper types
79pub mod util {
80    /// Cache line size for alignment purposes
81    pub const CACHE_LINE_SIZE: usize = 64;
82
83    /// Align a value to cache line boundaries
84    #[inline]
85    pub const fn align_to_cache_line(size: usize) -> usize {
86        (size + CACHE_LINE_SIZE - 1) & !(CACHE_LINE_SIZE - 1)
87    }
88
89    /// Pad a struct to cache line size
90    #[repr(align(64))]
91    pub struct CachePadded<T> {
92        value: T,
93    }
94
95    impl<T> CachePadded<T> {
96        /// Create a new cache-padded value
97        #[inline]
98        pub const fn new(value: T) -> Self {
99            Self { value }
100        }
101
102        /// Get a reference to the inner value
103        #[inline]
104        pub const fn get(&self) -> &T {
105            &self.value
106        }
107
108        /// Get a mutable reference to the inner value
109        #[inline]
110        pub fn get_mut(&mut self) -> &mut T {
111            &mut self.value
112        }
113
114        /// Get the inner value
115        #[inline]
116        pub fn into_inner(self) -> T {
117            self.value
118        }
119    }
120
121    // Implement atomic operations for common atomic types
122    impl CachePadded<core::sync::atomic::AtomicUsize> {
123        /// Store a value into the atomic integer
124        #[inline]
125        pub fn store(&self, val: usize, order: core::sync::atomic::Ordering) {
126            self.value.store(val, order);
127        }
128
129        /// Load a value from the atomic integer
130        #[inline]
131        pub fn load(&self, order: core::sync::atomic::Ordering) -> usize {
132            self.value.load(order)
133        }
134
135        /// Compare and exchange operation on the atomic integer
136        #[inline]
137        pub fn compare_exchange(
138            &self,
139            current: usize,
140            new: usize,
141            success: core::sync::atomic::Ordering,
142            failure: core::sync::atomic::Ordering,
143        ) -> Result<usize, usize> {
144            self.value.compare_exchange(current, new, success, failure)
145        }
146    }
147
148    impl CachePadded<core::sync::atomic::AtomicIsize> {
149        /// Store a value into the atomic integer
150        #[inline]
151        pub fn store(&self, val: isize, order: core::sync::atomic::Ordering) {
152            self.value.store(val, order);
153        }
154
155        /// Load a value from the atomic integer
156        #[inline]
157        pub fn load(&self, order: core::sync::atomic::Ordering) -> isize {
158            self.value.load(order)
159        }
160
161        /// Compare and exchange operation on the atomic integer
162        #[inline]
163        pub fn compare_exchange(
164            &self,
165            current: isize,
166            new: isize,
167            success: core::sync::atomic::Ordering,
168            failure: core::sync::atomic::Ordering,
169        ) -> Result<isize, isize> {
170            self.value.compare_exchange(current, new, success, failure)
171        }
172    }
173
174    impl CachePadded<core::sync::atomic::AtomicBool> {
175        /// Store a value into the atomic boolean
176        #[inline]
177        pub fn store(&self, val: bool, order: core::sync::atomic::Ordering) {
178            self.value.store(val, order);
179        }
180
181        /// Load a value from the atomic boolean
182        #[inline]
183        pub fn load(&self, order: core::sync::atomic::Ordering) -> bool {
184            self.value.load(order)
185        }
186
187        /// Compare and exchange operation on the atomic boolean
188        #[inline]
189        pub fn compare_exchange(
190            &self,
191            current: bool,
192            new: bool,
193            success: core::sync::atomic::Ordering,
194            failure: core::sync::atomic::Ordering,
195        ) -> Result<bool, bool> {
196            self.value.compare_exchange(current, new, success, failure)
197        }
198    }
199
200    impl<T> CachePadded<core::sync::atomic::AtomicPtr<T>> {
201        /// Store a value into the atomic pointer
202        #[inline]
203        pub fn store(&self, val: *mut T, order: core::sync::atomic::Ordering) {
204            self.value.store(val, order);
205        }
206
207        /// Load a value from the atomic pointer
208        #[inline]
209        pub fn load(&self, order: core::sync::atomic::Ordering) -> *mut T {
210            self.value.load(order)
211        }
212
213        /// Compare and exchange operation on the atomic pointer
214        #[inline]
215        pub fn compare_exchange(
216            &self,
217            current: *mut T,
218            new: *mut T,
219            success: core::sync::atomic::Ordering,
220            failure: core::sync::atomic::Ordering,
221        ) -> Result<*mut T, *mut T> {
222            self.value.compare_exchange(current, new, success, failure)
223        }
224    }
225
226    impl CachePadded<core::sync::atomic::AtomicU64> {
227        /// Add to the atomic integer, returning the previous value
228        #[inline]
229        pub fn fetch_add(&self, val: u64, order: core::sync::atomic::Ordering) -> u64 {
230            self.value.fetch_add(val, order)
231        }
232    }
233
234    #[cfg(feature = "std")]
235    impl<T> CachePadded<parking_lot::Mutex<T>> {
236        /// Lock the mutex
237        #[inline]
238        pub fn lock(&self) -> parking_lot::MutexGuard<'_, T> {
239            self.value.lock()
240        }
241    }
242
243    impl<T> CachePadded<T> {
244        /// Get a mutable reference to the inner value (for indexing)
245        #[inline]
246        pub fn inner_mut(&mut self) -> &mut T {
247            &mut self.value
248        }
249
250        /// Get a reference to the inner value (for indexing)
251        #[inline]
252        pub fn inner(&self) -> &T {
253            &self.value
254        }
255    }
256
257    impl<T: Clone> Clone for CachePadded<T> {
258        fn clone(&self) -> Self {
259            Self::new(self.value.clone())
260        }
261    }
262
263    impl<T: Copy> Copy for CachePadded<T> {}
264
265    impl<T: core::fmt::Debug> core::fmt::Debug for CachePadded<T> {
266        fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
267            core::fmt::Debug::fmt(&self.value, f)
268        }
269    }
270}
271
272/// Error types for VelocityX operations
273#[derive(Debug, Clone, PartialEq, Eq)]
274pub enum Error {
275    /// Operation would block (queue full, etc.)
276    WouldBlock,
277    /// Queue or data structure is closed
278    Closed,
279    /// Invalid operation for current state
280    InvalidState,
281    /// Capacity exceeded (for bounded structures)
282    CapacityExceeded,
283    /// Data structure is poisoned (thread panic occurred)
284    Poisoned,
285    /// Invalid argument provided
286    InvalidArgument,
287    /// Memory allocation failed
288    OutOfMemory,
289    /// Operation timed out
290    Timeout,
291}
292
293impl core::fmt::Display for Error {
294    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
295        match self {
296            Error::WouldBlock => write!(f, "Operation would block"),
297            Error::Closed => write!(f, "Data structure is closed"),
298            Error::InvalidState => write!(f, "Invalid operation for current state"),
299            Error::CapacityExceeded => write!(f, "Capacity exceeded"),
300            Error::Poisoned => write!(f, "Data structure is poisoned due to thread panic"),
301            Error::InvalidArgument => write!(f, "Invalid argument provided"),
302            Error::OutOfMemory => write!(f, "Memory allocation failed"),
303            Error::Timeout => write!(f, "Operation timed out"),
304        }
305    }
306}
307
308#[cfg(feature = "std")]
309impl std::error::Error for Error {}
310
311/// Result type for VelocityX operations
312pub type Result<T> = core::result::Result<T, Error>;
313
314#[cfg(test)]
315mod tests {
316    use super::*;
317    use std::string::ToString;
318
319    #[test]
320    fn test_cache_line_alignment() {
321        assert_eq!(util::align_to_cache_line(1), 64);
322        assert_eq!(util::align_to_cache_line(64), 64);
323        assert_eq!(util::align_to_cache_line(65), 128);
324        assert_eq!(util::align_to_cache_line(127), 128);
325        assert_eq!(util::align_to_cache_line(128), 128);
326    }
327
328    #[test]
329    fn test_cache_padded() {
330        let padded = util::CachePadded::new(42);
331        assert_eq!(*padded.get(), 42);
332
333        let mut padded = padded;
334        *padded.get_mut() = 100;
335        assert_eq!(padded.into_inner(), 100);
336    }
337
338    #[test]
339    fn test_error_display() {
340        assert_eq!(
341            Error::WouldBlock.to_string().trim(),
342            "Operation would block"
343        );
344        assert_eq!(Error::Closed.to_string().trim(), "Data structure is closed");
345        assert_eq!(
346            Error::InvalidState.to_string().trim(),
347            "Invalid operation for current state"
348        );
349        assert_eq!(
350            Error::CapacityExceeded.to_string().trim(),
351            "Capacity exceeded"
352        );
353        assert_eq!(
354            Error::Poisoned.to_string().trim(),
355            "Data structure is poisoned due to thread panic"
356        );
357        assert_eq!(
358            Error::InvalidArgument.to_string().trim(),
359            "Invalid argument provided"
360        );
361        assert_eq!(
362            Error::OutOfMemory.to_string().trim(),
363            "Memory allocation failed"
364        );
365        assert_eq!(Error::Timeout.to_string().trim(), "Operation timed out");
366    }
367}