mmap_io/
atomic.rs

1//! Atomic memory views for lock-free concurrent access to specific data types.
2
3use crate::errors::{MmapIoError, Result};
4use crate::mmap::MemoryMappedFile;
5use std::sync::atomic::{AtomicU32, AtomicU64};
6
7impl MemoryMappedFile {
8    /// Get an atomic view of a u64 value at the specified offset.
9    ///
10    /// The offset must be properly aligned for atomic operations (8-byte alignment for u64).
11    /// This allows lock-free concurrent access to the value.
12    ///
13    /// # Safety
14    ///
15    /// The returned reference is valid for the lifetime of the memory mapping.
16    /// The caller must ensure that the memory at this offset is not concurrently
17    /// modified through non-atomic operations.
18    ///
19    /// # Errors
20    ///
21    /// Returns `MmapIoError::Misaligned` if the offset is not 8-byte aligned.
22    /// Returns `MmapIoError::OutOfBounds` if the offset + 8 exceeds file bounds.
23    #[cfg(feature = "atomic")]
24    pub fn atomic_u64(&self, offset: u64) -> Result<&AtomicU64> {
25        const ALIGN: u64 = std::mem::align_of::<AtomicU64>() as u64;
26        const SIZE: u64 = std::mem::size_of::<AtomicU64>() as u64;
27
28        // Check alignment
29        if offset % ALIGN != 0 {
30            return Err(MmapIoError::Misaligned {
31                required: ALIGN,
32                offset,
33            });
34        }
35
36        // Check bounds
37        let total = self.current_len()?;
38        if offset + SIZE > total {
39            return Err(MmapIoError::OutOfBounds {
40                offset,
41                len: SIZE,
42                total,
43            });
44        }
45
46        // Get the base pointer for the mapping
47        let ptr = match &self.inner.map {
48            crate::mmap::MapVariant::Ro(m) => m.as_ptr(),
49            crate::mmap::MapVariant::Rw(lock) => {
50                let guard = lock.read();
51                guard.as_ptr()
52            }
53            crate::mmap::MapVariant::Cow(m) => m.as_ptr(),
54        };
55
56        // SAFETY: Multiple invariants are guaranteed:
57        // 1. Alignment: We've verified offset % 8 == 0 (required for AtomicU64)
58        // 2. Bounds: We've verified offset + 8 <= total file size
59        // 3. Overflow: try_into() ensures offset fits in usize, preventing ptr arithmetic overflow
60        // 4. Lifetime: The returned reference is bound to 'self', ensuring the mapping outlives it
61        // 5. Validity: The memory is mapped and valid for the entire file size
62        // 6. Atomicity: The hardware guarantees atomic operations on aligned 8-byte values
63        let offset_usize = offset.try_into().map_err(|_| MmapIoError::OutOfBounds {
64            offset,
65            len: SIZE,
66            total,
67        })?;
68        unsafe {
69            // ptr.add() is safe because:
70            // - offset_usize is guaranteed to be within bounds (checked above)
71            // - The resulting pointer is within the mapped region
72            let addr = ptr.add(offset_usize);
73            let atomic_ptr = addr as *const AtomicU64;
74            Ok(&*atomic_ptr)
75        }
76    }
77
78    /// Get an atomic view of a u32 value at the specified offset.
79    ///
80    /// The offset must be properly aligned for atomic operations (4-byte alignment for u32).
81    /// This allows lock-free concurrent access to the value.
82    ///
83    /// # Safety
84    ///
85    /// The returned reference is valid for the lifetime of the memory mapping.
86    /// The caller must ensure that the memory at this offset is not concurrently
87    /// modified through non-atomic operations.
88    ///
89    /// # Errors
90    ///
91    /// Returns `MmapIoError::Misaligned` if the offset is not 4-byte aligned.
92    /// Returns `MmapIoError::OutOfBounds` if the offset + 4 exceeds file bounds.
93    #[cfg(feature = "atomic")]
94    pub fn atomic_u32(&self, offset: u64) -> Result<&AtomicU32> {
95        const ALIGN: u64 = std::mem::align_of::<AtomicU32>() as u64;
96        const SIZE: u64 = std::mem::size_of::<AtomicU32>() as u64;
97
98        // Check alignment
99        if offset % ALIGN != 0 {
100            return Err(MmapIoError::Misaligned {
101                required: ALIGN,
102                offset,
103            });
104        }
105
106        // Check bounds
107        let total = self.current_len()?;
108        if offset + SIZE > total {
109            return Err(MmapIoError::OutOfBounds {
110                offset,
111                len: SIZE,
112                total,
113            });
114        }
115
116        // Get the base pointer for the mapping
117        let ptr = match &self.inner.map {
118            crate::mmap::MapVariant::Ro(m) => m.as_ptr(),
119            crate::mmap::MapVariant::Rw(lock) => {
120                let guard = lock.read();
121                guard.as_ptr()
122            }
123            crate::mmap::MapVariant::Cow(m) => m.as_ptr(),
124        };
125
126        // SAFETY: Multiple invariants are guaranteed:
127        // 1. Alignment: We've verified offset % 4 == 0 (required for AtomicU32)
128        // 2. Bounds: We've verified offset + 4 <= total file size
129        // 3. Overflow: try_into() ensures offset fits in usize, preventing ptr arithmetic overflow
130        // 4. Lifetime: The returned reference is bound to 'self', ensuring the mapping outlives it
131        // 5. Validity: The memory is mapped and valid for the entire file size
132        // 6. Atomicity: The hardware guarantees atomic operations on aligned 4-byte values
133        let offset_usize = offset.try_into().map_err(|_| MmapIoError::OutOfBounds {
134            offset,
135            len: SIZE,
136            total,
137        })?;
138        unsafe {
139            // ptr.add() is safe because:
140            // - offset_usize is guaranteed to be within bounds (checked above)
141            // - The resulting pointer is within the mapped region
142            let addr = ptr.add(offset_usize);
143            let atomic_ptr = addr as *const AtomicU32;
144            Ok(&*atomic_ptr)
145        }
146    }
147
148    /// Get multiple atomic u64 views starting at the specified offset.
149    ///
150    /// Returns a slice of atomic values. All values must be within bounds
151    /// and the offset must be 8-byte aligned.
152    ///
153    /// # Errors
154    ///
155    /// Returns `MmapIoError::Misaligned` if the offset is not 8-byte aligned.
156    /// Returns `MmapIoError::OutOfBounds` if the range exceeds file bounds.
157    #[cfg(feature = "atomic")]
158    pub fn atomic_u64_slice(&self, offset: u64, count: usize) -> Result<&[AtomicU64]> {
159        const ALIGN: u64 = std::mem::align_of::<AtomicU64>() as u64;
160        const SIZE: u64 = std::mem::size_of::<AtomicU64>() as u64;
161
162        // Check alignment
163        if offset % ALIGN != 0 {
164            return Err(MmapIoError::Misaligned {
165                required: ALIGN,
166                offset,
167            });
168        }
169
170        // Check bounds
171        let total_size = SIZE * count as u64;
172        let total = self.current_len()?;
173        if offset + total_size > total {
174            return Err(MmapIoError::OutOfBounds {
175                offset,
176                len: total_size,
177                total,
178            });
179        }
180
181        // Get the base pointer for the mapping
182        let ptr = match &self.inner.map {
183            crate::mmap::MapVariant::Ro(m) => m.as_ptr(),
184            crate::mmap::MapVariant::Rw(lock) => {
185                let guard = lock.read();
186                guard.as_ptr()
187            }
188            crate::mmap::MapVariant::Cow(m) => m.as_ptr(),
189        };
190
191        // SAFETY: Multiple invariants are guaranteed:
192        // 1. Alignment: We've verified offset % 8 == 0 (required for AtomicU64 array)
193        // 2. Bounds: We've verified offset + (count * 8) <= total file size
194        // 3. Overflow: try_into() ensures offset fits in usize, preventing ptr arithmetic overflow
195        // 4. Lifetime: The returned slice is bound to 'self', ensuring the mapping outlives it
196        // 5. Validity: The memory is mapped and valid for the entire requested range
197        // 6. Atomicity: Each element in the slice maintains 8-byte alignment for atomic operations
198        let offset_usize = offset.try_into().map_err(|_| MmapIoError::OutOfBounds {
199            offset,
200            len: total_size,
201            total,
202        })?;
203        unsafe {
204            // ptr.add() is safe because:
205            // - offset_usize is guaranteed to be within bounds (checked above)
206            // - The resulting pointer is within the mapped region
207            let addr = ptr.add(offset_usize);
208            let atomic_ptr = addr as *const AtomicU64;
209            // from_raw_parts is safe because:
210            // - atomic_ptr points to valid, aligned memory
211            // - count elements fit within the mapped region (verified above)
212            Ok(std::slice::from_raw_parts(atomic_ptr, count))
213        }
214    }
215
216    /// Get multiple atomic u32 views starting at the specified offset.
217    ///
218    /// Returns a slice of atomic values. All values must be within bounds
219    /// and the offset must be 4-byte aligned.
220    ///
221    /// # Errors
222    ///
223    /// Returns `MmapIoError::Misaligned` if the offset is not 4-byte aligned.
224    /// Returns `MmapIoError::OutOfBounds` if the range exceeds file bounds.
225    #[cfg(feature = "atomic")]
226    pub fn atomic_u32_slice(&self, offset: u64, count: usize) -> Result<&[AtomicU32]> {
227        const ALIGN: u64 = std::mem::align_of::<AtomicU32>() as u64;
228        const SIZE: u64 = std::mem::size_of::<AtomicU32>() as u64;
229
230        // Check alignment
231        if offset % ALIGN != 0 {
232            return Err(MmapIoError::Misaligned {
233                required: ALIGN,
234                offset,
235            });
236        }
237
238        // Check bounds
239        let total_size = SIZE * count as u64;
240        let total = self.current_len()?;
241        if offset + total_size > total {
242            return Err(MmapIoError::OutOfBounds {
243                offset,
244                len: total_size,
245                total,
246            });
247        }
248
249        // Get the base pointer for the mapping
250        let ptr = match &self.inner.map {
251            crate::mmap::MapVariant::Ro(m) => m.as_ptr(),
252            crate::mmap::MapVariant::Rw(lock) => {
253                let guard = lock.read();
254                guard.as_ptr()
255            }
256            crate::mmap::MapVariant::Cow(m) => m.as_ptr(),
257        };
258
259        // SAFETY: Multiple invariants are guaranteed:
260        // 1. Alignment: We've verified offset % 4 == 0 (required for AtomicU32 array)
261        // 2. Bounds: We've verified offset + (count * 4) <= total file size
262        // 3. Overflow: try_into() ensures offset fits in usize, preventing ptr arithmetic overflow
263        // 4. Lifetime: The returned slice is bound to 'self', ensuring the mapping outlives it
264        // 5. Validity: The memory is mapped and valid for the entire requested range
265        // 6. Atomicity: Each element in the slice maintains 4-byte alignment for atomic operations
266        let offset_usize = offset.try_into().map_err(|_| MmapIoError::OutOfBounds {
267            offset,
268            len: total_size,
269            total,
270        })?;
271        unsafe {
272            // ptr.add() is safe because:
273            // - offset_usize is guaranteed to be within bounds (checked above)
274            // - The resulting pointer is within the mapped region
275            let addr = ptr.add(offset_usize);
276            let atomic_ptr = addr as *const AtomicU32;
277            // from_raw_parts is safe because:
278            // - atomic_ptr points to valid, aligned memory
279            // - count elements fit within the mapped region (verified above)
280            Ok(std::slice::from_raw_parts(atomic_ptr, count))
281        }
282    }
283}
284
285#[cfg(test)]
286mod tests {
287    use super::*;
288    use crate::create_mmap;
289    use std::fs;
290    use std::path::PathBuf;
291    use std::sync::atomic::Ordering;
292
293    fn tmp_path(name: &str) -> PathBuf {
294        let mut p = std::env::temp_dir();
295        p.push(format!(
296            "mmap_io_atomic_test_{}_{}",
297            name,
298            std::process::id()
299        ));
300        p
301    }
302
303    #[test]
304    #[cfg(feature = "atomic")]
305    fn test_atomic_u64_operations() {
306        let path = tmp_path("atomic_u64");
307        let _ = fs::remove_file(&path);
308
309        let mmap = create_mmap(&path, 64).expect("create");
310
311        // Test aligned access
312        let atomic = mmap.atomic_u64(0).expect("atomic at 0");
313        atomic.store(0x1234567890ABCDEF, Ordering::SeqCst);
314        assert_eq!(atomic.load(Ordering::SeqCst), 0x1234567890ABCDEF);
315
316        // Test another aligned offset
317        let atomic2 = mmap.atomic_u64(8).expect("atomic at 8");
318        atomic2.store(0xFEDCBA0987654321, Ordering::SeqCst);
319        assert_eq!(atomic2.load(Ordering::SeqCst), 0xFEDCBA0987654321);
320
321        // Test misaligned access
322        assert!(matches!(
323            mmap.atomic_u64(1),
324            Err(MmapIoError::Misaligned {
325                required: 8,
326                offset: 1
327            })
328        ));
329        assert!(matches!(
330            mmap.atomic_u64(7),
331            Err(MmapIoError::Misaligned {
332                required: 8,
333                offset: 7
334            })
335        ));
336
337        // Test out of bounds
338        assert!(mmap.atomic_u64(64).is_err());
339        assert!(mmap.atomic_u64(57).is_err()); // Would need 8 bytes
340
341        fs::remove_file(&path).expect("cleanup");
342    }
343
344    #[test]
345    #[cfg(feature = "atomic")]
346    fn test_atomic_u32_operations() {
347        let path = tmp_path("atomic_u32");
348        let _ = fs::remove_file(&path);
349
350        let mmap = create_mmap(&path, 32).expect("create");
351
352        // Test aligned access
353        let atomic = mmap.atomic_u32(0).expect("atomic at 0");
354        atomic.store(0x12345678, Ordering::SeqCst);
355        assert_eq!(atomic.load(Ordering::SeqCst), 0x12345678);
356
357        // Test another aligned offset
358        let atomic2 = mmap.atomic_u32(4).expect("atomic at 4");
359        atomic2.store(0x87654321, Ordering::SeqCst);
360        assert_eq!(atomic2.load(Ordering::SeqCst), 0x87654321);
361
362        // Test misaligned access
363        assert!(matches!(
364            mmap.atomic_u32(1),
365            Err(MmapIoError::Misaligned {
366                required: 4,
367                offset: 1
368            })
369        ));
370        assert!(matches!(
371            mmap.atomic_u32(3),
372            Err(MmapIoError::Misaligned {
373                required: 4,
374                offset: 3
375            })
376        ));
377
378        // Test out of bounds
379        assert!(mmap.atomic_u32(32).is_err());
380        assert!(mmap.atomic_u32(29).is_err()); // Would need 4 bytes
381
382        fs::remove_file(&path).expect("cleanup");
383    }
384
385    #[test]
386    #[cfg(feature = "atomic")]
387    fn test_atomic_slices() {
388        let path = tmp_path("atomic_slices");
389        let _ = fs::remove_file(&path);
390
391        let mmap = create_mmap(&path, 128).expect("create");
392
393        // Test u64 slice
394        let slice = mmap.atomic_u64_slice(0, 4).expect("u64 slice");
395        assert_eq!(slice.len(), 4);
396        for (i, atomic) in slice.iter().enumerate() {
397            atomic.store(i as u64 * 100, Ordering::SeqCst);
398        }
399        for (i, atomic) in slice.iter().enumerate() {
400            assert_eq!(atomic.load(Ordering::SeqCst), i as u64 * 100);
401        }
402
403        // Test u32 slice
404        let slice = mmap.atomic_u32_slice(64, 8).expect("u32 slice");
405        assert_eq!(slice.len(), 8);
406        for (i, atomic) in slice.iter().enumerate() {
407            atomic.store(i as u32 * 10, Ordering::SeqCst);
408        }
409        for (i, atomic) in slice.iter().enumerate() {
410            assert_eq!(atomic.load(Ordering::SeqCst), i as u32 * 10);
411        }
412
413        // Test misaligned slice
414        assert!(mmap.atomic_u64_slice(1, 2).is_err());
415        assert!(mmap.atomic_u32_slice(2, 2).is_err());
416
417        // Test out of bounds slice
418        assert!(mmap.atomic_u64_slice(120, 2).is_err()); // Would need 16 bytes
419        assert!(mmap.atomic_u32_slice(124, 2).is_err()); // Would need 8 bytes
420
421        fs::remove_file(&path).expect("cleanup");
422    }
423
424    #[test]
425    #[cfg(feature = "atomic")]
426    fn test_atomic_with_different_modes() {
427        let path = tmp_path("atomic_modes");
428        let _ = fs::remove_file(&path);
429
430        // Create and write initial values
431        let mmap = create_mmap(&path, 16).expect("create");
432        let atomic = mmap.atomic_u64(0).expect("atomic");
433        atomic.store(42, Ordering::SeqCst);
434        mmap.flush().expect("flush");
435        drop(mmap);
436
437        // Test with RO mode
438        let mmap = MemoryMappedFile::open_ro(&path).expect("open ro");
439        let atomic = mmap.atomic_u64(0).expect("atomic ro");
440        assert_eq!(atomic.load(Ordering::SeqCst), 42);
441        // Note: Writing to RO atomic would be UB, so we don't test it
442
443        #[cfg(feature = "cow")]
444        {
445            // Test with COW mode
446            let mmap = MemoryMappedFile::open_cow(&path).expect("open cow");
447            let atomic = mmap.atomic_u64(0).expect("atomic cow");
448            assert_eq!(atomic.load(Ordering::SeqCst), 42);
449            // COW writes would only affect this process
450        }
451
452        fs::remove_file(&path).expect("cleanup");
453    }
454
455    #[test]
456    #[cfg(feature = "atomic")]
457    fn test_concurrent_atomic_access() {
458        use std::sync::Arc;
459        use std::thread;
460
461        let path = tmp_path("concurrent_atomic");
462        let _ = fs::remove_file(&path);
463
464        let mmap = Arc::new(create_mmap(&path, 8).expect("create"));
465        let atomic = mmap.atomic_u64(0).expect("atomic");
466        atomic.store(0, Ordering::SeqCst);
467
468        // Spawn multiple threads incrementing the same atomic
469        let handles: Vec<_> = (0..4)
470            .map(|_| {
471                let mmap = Arc::clone(&mmap);
472                thread::spawn(move || {
473                    let atomic = mmap.atomic_u64(0).expect("atomic in thread");
474                    for _ in 0..1000 {
475                        atomic.fetch_add(1, Ordering::SeqCst);
476                    }
477                })
478            })
479            .collect();
480
481        for handle in handles {
482            handle.join().expect("thread join");
483        }
484
485        // Verify all increments were recorded
486        assert_eq!(atomic.load(Ordering::SeqCst), 4000);
487
488        fs::remove_file(&path).expect("cleanup");
489    }
490}