mmap_io/
mmap.rs

1/// Hint for when to touch (prewarm) memory pages during mapping creation.
2#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
3pub enum TouchHint {
4    /// Don't touch pages during creation (default).
5    #[default]
6    Never,
7    /// Eagerly touch all pages during creation to prewarm page tables
8    /// and improve first-access latency. Useful for benchmarking scenarios
9    /// where you want consistent timing without page fault overhead.
10    Eager,
11    /// Touch pages lazily on first access (same as Never for now).
12    Lazy,
13}
14
15/// Low-level memory-mapped file abstraction with safe, concurrent access.
16use std::{
17    fs::{File, OpenOptions},
18    path::{Path, PathBuf},
19    sync::Arc,
20};
21
22use memmap2::{Mmap, MmapMut};
23
24use crate::flush::FlushPolicy;
25
26#[cfg(feature = "cow")]
27use memmap2::MmapOptions;
28
29use parking_lot::RwLock;
30
31use crate::errors::{MmapIoError, Result};
32use crate::utils::{ensure_in_bounds, slice_range};
33
34// Error message constants
35const ERR_ZERO_SIZE: &str = "Size must be greater than zero";
36const ERR_ZERO_LENGTH_FILE: &str = "Cannot map zero-length file";
37
38// Maximum safe mmap size: 128TB (reasonable limit for most systems)
39// This prevents accidental exhaustion of address space or disk
40// Note: This is intentionally very large to support legitimate use cases
41// while still preventing obvious errors like u64::MAX
42#[cfg(target_pointer_width = "64")]
43const MAX_MMAP_SIZE: u64 = 128 * (1 << 40); // 128 TB on 64-bit systems
44
45#[cfg(target_pointer_width = "32")]
46const MAX_MMAP_SIZE: u64 = 2 * (1 << 30); // 2 GB on 32-bit systems (practical limit)
47
48/// Access mode for a memory-mapped file.
49#[derive(Debug, Clone, Copy, PartialEq, Eq)]
50pub enum MmapMode {
51    /// Read-only mapping.
52    ReadOnly,
53    /// Read-write mapping.
54    ReadWrite,
55    /// Copy-on-Write mapping (private). Writes affect this mapping only; the underlying file remains unchanged.
56    CopyOnWrite,
57}
58
59#[doc(hidden)]
60pub struct Inner {
61    pub(crate) path: PathBuf,
62    pub(crate) file: File,
63    pub(crate) mode: MmapMode,
64    // Cached length to avoid repeated metadata queries
65    pub(crate) cached_len: RwLock<u64>,
66    // The mapping itself. We use an enum to hold either RO or RW mapping.
67    pub(crate) map: MapVariant,
68    // Flush policy and accounting (RW only)
69    pub(crate) flush_policy: FlushPolicy,
70    pub(crate) written_since_last_flush: RwLock<u64>,
71    // Huge pages preference (builder-set), effective on supported platforms
72    #[cfg(feature = "hugepages")]
73    pub(crate) huge_pages: bool,
74}
75
76#[doc(hidden)]
77pub enum MapVariant {
78    Ro(Mmap),
79    Rw(RwLock<MmapMut>),
80    /// Private, per-process copy-on-write mapping. Underlying file is not modified by writes.
81    Cow(Mmap),
82}
83
84/// Memory-mapped file with safe, zero-copy region access.
85///
86/// This is the core type for memory-mapped file operations. It provides:
87/// - Safe concurrent access through interior mutability
88/// - Zero-copy reads and writes
89/// - Automatic bounds checking
90/// - Cross-platform compatibility
91///
92/// # Examples
93///
94/// ```no_run
95/// use mmap_io::{MemoryMappedFile, MmapMode};
96///
97/// // Create a new 1KB file
98/// let mmap = MemoryMappedFile::create_rw("data.bin", 1024)?;
99///
100/// // Write some data
101/// mmap.update_region(0, b"Hello, world!")?;
102/// mmap.flush()?;
103///
104/// // Open existing file read-only
105/// let ro_mmap = MemoryMappedFile::open_ro("data.bin")?;
106/// let data = ro_mmap.as_slice(0, 13)?;
107/// assert_eq!(data, b"Hello, world!");
108/// # Ok::<(), mmap_io::MmapIoError>(())
109/// ```
110///
111/// Cloning this struct is cheap; it clones an Arc to the inner state.
112/// For read-write mappings, interior mutability is protected with an `RwLock`.
113#[derive(Clone)]
114pub struct MemoryMappedFile {
115    pub(crate) inner: Arc<Inner>,
116}
117
118impl std::fmt::Debug for MemoryMappedFile {
119    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
120        let mut ds = f.debug_struct("MemoryMappedFile");
121        ds.field("path", &self.inner.path)
122            .field("mode", &self.inner.mode)
123            .field("len", &self.len());
124        #[cfg(feature = "hugepages")]
125        {
126            ds.field("huge_pages", &self.inner.huge_pages);
127        }
128        ds.finish()
129    }
130}
131
132impl MemoryMappedFile {
133    /// Builder for constructing a MemoryMappedFile with custom options.
134    ///
135    /// Example:
136    /// ```
137    /// # use mmap_io::{MemoryMappedFile, MmapMode};
138    /// # use mmap_io::flush::FlushPolicy;
139    /// // let mmap = MemoryMappedFile::builder("file.bin")
140    /// //     .mode(MmapMode::ReadWrite)
141    /// //     .size(1_000_000)
142    /// //     .flush_policy(FlushPolicy::EveryBytes(1_000_000))
143    /// //     .create().unwrap();
144    /// ```
145    pub fn builder<P: AsRef<Path>>(path: P) -> MemoryMappedFileBuilder {
146        MemoryMappedFileBuilder {
147            path: path.as_ref().to_path_buf(),
148            size: None,
149            mode: None,
150            flush_policy: FlushPolicy::default(),
151            touch_hint: TouchHint::default(),
152            #[cfg(feature = "hugepages")]
153            huge_pages: false,
154        }
155    }
156
157    /// Create a new file (truncating if exists) and memory-map it in read-write mode with the given size.
158    ///
159    /// # Performance
160    ///
161    /// - **Time Complexity**: O(1) for mapping creation
162    /// - **Memory Usage**: Virtual address space of `size` bytes (physical memory allocated on demand)
163    /// - **I/O Operations**: One file creation, one truncate, one mmap syscall
164    ///
165    /// # Errors
166    ///
167    /// Returns `MmapIoError::ResizeFailed` if size is zero or exceeds the maximum safe limit.
168    /// Returns `MmapIoError::Io` if file creation or mapping fails.
169    pub fn create_rw<P: AsRef<Path>>(path: P, size: u64) -> Result<Self> {
170        if size == 0 {
171            return Err(MmapIoError::ResizeFailed(ERR_ZERO_SIZE.into()));
172        }
173        if size > MAX_MMAP_SIZE {
174            return Err(MmapIoError::ResizeFailed(format!(
175                "Size {size} exceeds maximum safe limit of {MAX_MMAP_SIZE} bytes"
176            )));
177        }
178        let path_ref = path.as_ref();
179        let file = OpenOptions::new()
180            .create(true)
181            .write(true)
182            .read(true)
183            .truncate(true)
184            .open(path_ref)?;
185        file.set_len(size)?;
186        // SAFETY: The file has been created with the correct size and permissions.
187        // memmap2 handles platform-specific mmap details safely.
188        // Note: create_rw convenience ignores huge pages; use builder for that.
189        let mmap = unsafe { MmapMut::map_mut(&file)? };
190        let inner = Inner {
191            path: path_ref.to_path_buf(),
192            file,
193            mode: MmapMode::ReadWrite,
194            cached_len: RwLock::new(size),
195            map: MapVariant::Rw(RwLock::new(mmap)),
196            flush_policy: FlushPolicy::default(),
197            written_since_last_flush: RwLock::new(0),
198            #[cfg(feature = "hugepages")]
199            huge_pages: false,
200        };
201        Ok(Self {
202            inner: Arc::new(inner),
203        })
204    }
205
206    /// Open an existing file and memory-map it read-only.
207    ///
208    /// # Errors
209    ///
210    /// Returns `MmapIoError::Io` if file opening or mapping fails.
211    pub fn open_ro<P: AsRef<Path>>(path: P) -> Result<Self> {
212        let path_ref = path.as_ref();
213        let file = OpenOptions::new().read(true).open(path_ref)?;
214        let len = file.metadata()?.len();
215        // SAFETY: The file is opened read-only and memmap2 ensures safe mapping.
216        let mmap = unsafe { Mmap::map(&file)? };
217        let inner = Inner {
218            path: path_ref.to_path_buf(),
219            file,
220            mode: MmapMode::ReadOnly,
221            cached_len: RwLock::new(len),
222            map: MapVariant::Ro(mmap),
223            flush_policy: FlushPolicy::Never,
224            written_since_last_flush: RwLock::new(0),
225            #[cfg(feature = "hugepages")]
226            huge_pages: false,
227        };
228        Ok(Self {
229            inner: Arc::new(inner),
230        })
231    }
232
233    /// Open an existing file and memory-map it read-write.
234    ///
235    /// # Errors
236    ///
237    /// Returns `MmapIoError::ResizeFailed` if file is zero-length.
238    /// Returns `MmapIoError::Io` if file opening or mapping fails.
239    pub fn open_rw<P: AsRef<Path>>(path: P) -> Result<Self> {
240        let path_ref = path.as_ref();
241        let file = OpenOptions::new().read(true).write(true).open(path_ref)?;
242        let len = file.metadata()?.len();
243        if len == 0 {
244            return Err(MmapIoError::ResizeFailed(ERR_ZERO_LENGTH_FILE.into()));
245        }
246        // SAFETY: The file is opened read-write with proper permissions.
247        // We've verified the file is not zero-length.
248        // Note: open_rw convenience ignores huge pages; use builder for that.
249        let mmap = unsafe { MmapMut::map_mut(&file)? };
250        let inner = Inner {
251            path: path_ref.to_path_buf(),
252            file,
253            mode: MmapMode::ReadWrite,
254            cached_len: RwLock::new(len),
255            map: MapVariant::Rw(RwLock::new(mmap)),
256            flush_policy: FlushPolicy::default(),
257            written_since_last_flush: RwLock::new(0),
258            #[cfg(feature = "hugepages")]
259            huge_pages: false,
260        };
261        Ok(Self {
262            inner: Arc::new(inner),
263        })
264    }
265
266    /// Return current mapping mode.
267    #[must_use]
268    pub fn mode(&self) -> MmapMode {
269        self.inner.mode
270    }
271
272    /// Total length of the mapped file in bytes (cached).
273    #[must_use]
274    pub fn len(&self) -> u64 {
275        *self.inner.cached_len.read()
276    }
277
278    /// Whether the mapped file is empty.
279    #[must_use]
280    pub fn is_empty(&self) -> bool {
281        self.len() == 0
282    }
283
284    /// Get a zero-copy read-only slice for the given [offset, offset+len).
285    /// For RW mappings, cannot return a reference bound to a temporary guard; use `read_into` instead.
286    ///
287    /// # Performance
288    ///
289    /// - **Time Complexity**: O(1) - direct pointer access
290    /// - **Memory Usage**: No additional allocation (zero-copy)
291    /// - **Cache Behavior**: May trigger page faults on first access
292    ///
293    /// # Errors
294    ///
295    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
296    /// Returns `MmapIoError::InvalidMode` for RW mappings (use `read_into` instead).
297    pub fn as_slice(&self, offset: u64, len: u64) -> Result<&[u8]> {
298        let total = self.current_len()?;
299        ensure_in_bounds(offset, len, total)?;
300        match &self.inner.map {
301            MapVariant::Ro(m) => {
302                let (start, end) = slice_range(offset, len, total)?;
303                Ok(&m[start..end])
304            }
305            MapVariant::Rw(_lock) => Err(MmapIoError::InvalidMode("use read_into for RW mappings")),
306            MapVariant::Cow(m) => {
307                let (start, end) = slice_range(offset, len, total)?;
308                Ok(&m[start..end])
309            }
310        }
311    }
312
313    /// Get a zero-copy mutable slice for the given [offset, offset+len).
314    /// Only available in `ReadWrite` mode.
315    ///
316    /// # Errors
317    ///
318    /// Returns `MmapIoError::InvalidMode` if not in `ReadWrite` mode.
319    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
320    pub fn as_slice_mut(&self, offset: u64, len: u64) -> Result<MappedSliceMut<'_>> {
321        let (start, end) = slice_range(offset, len, self.current_len()?)?;
322        match &self.inner.map {
323            MapVariant::Ro(_) => Err(MmapIoError::InvalidMode(
324                "mutable access on read-only mapping",
325            )),
326            MapVariant::Rw(lock) => {
327                let guard = lock.write();
328                Ok(MappedSliceMut {
329                    guard,
330                    range: start..end,
331                })
332            }
333            MapVariant::Cow(_) => {
334                // Phase-1: COW is read-only for safety. Writable COW will be added with a persistent
335                // private RW view in a follow-up change.
336                Err(MmapIoError::InvalidMode(
337                    "mutable access on copy-on-write mapping (phase-1 read-only)",
338                ))
339            }
340        }
341    }
342
343    /// Copy the provided bytes into the mapped file at the given offset.
344    /// Bounds-checked, zero-copy write.
345    ///
346    /// # Performance
347    ///
348    /// - **Time Complexity**: O(n) where n is data.len()
349    /// - **Memory Usage**: No additional allocation
350    /// - **I/O Operations**: May trigger flush based on flush policy
351    ///
352    /// # Errors
353    ///
354    /// Returns `MmapIoError::InvalidMode` if not in `ReadWrite` mode.
355    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
356    pub fn update_region(&self, offset: u64, data: &[u8]) -> Result<()> {
357        if data.is_empty() {
358            return Ok(());
359        }
360        if self.inner.mode != MmapMode::ReadWrite {
361            return Err(MmapIoError::InvalidMode(
362                "Update region requires ReadWrite mode.",
363            ));
364        }
365        let len = data.len() as u64;
366        let (start, end) = slice_range(offset, len, self.current_len()?)?;
367        match &self.inner.map {
368            MapVariant::Ro(_) => Err(MmapIoError::InvalidMode(
369                "Cannot write to read-only mapping",
370            )),
371            MapVariant::Rw(lock) => {
372                {
373                    let mut guard = lock.write();
374                    guard[start..end].copy_from_slice(data);
375                }
376                // Apply flush policy
377                self.apply_flush_policy(len)?;
378                Ok(())
379            }
380            MapVariant::Cow(_) => Err(MmapIoError::InvalidMode(
381                "Cannot write to copy-on-write mapping (phase-1 read-only)",
382            )),
383        }
384    }
385
386    /// Async write that enforces Async-Only Flushing semantics: always flush after write.
387    /// Uses spawn_blocking to avoid blocking the async scheduler.
388    #[cfg(feature = "async")]
389    pub async fn update_region_async(&self, offset: u64, data: &[u8]) -> Result<()> {
390        // Perform the write in a blocking task
391        let this = self.clone();
392        let data_vec = data.to_vec();
393        tokio::task::spawn_blocking(move || {
394            // Synchronous write
395            this.update_region(offset, &data_vec)?;
396            // Async-only flushing: unconditionally flush after write when using async path
397            this.flush()
398        })
399        .await
400        .map_err(|e| MmapIoError::FlushFailed(format!("join error: {e}")))?
401    }
402
403    /// Flush changes to disk. For read-only mappings, this is a no-op.
404    ///
405    /// Smart internal guards:
406    /// - Skip I/O when there are no pending writes (accumulator is zero)
407    /// - On Linux, use msync(MS_ASYNC) as a cheaper hint; fall back to full flush on error
408    ///
409    /// # Performance
410    ///
411    /// - **Time Complexity**: O(n) where n is the size of dirty pages
412    /// - **I/O Operations**: Triggers disk write of modified pages
413    /// - **Optimization**: Skips flush if no writes since last flush
414    /// - **Platform**: Linux uses async msync for better performance
415    ///
416    /// # Errors
417    ///
418    /// Returns `MmapIoError::FlushFailed` if flush operation fails.
419    pub fn flush(&self) -> Result<()> {
420        match &self.inner.map {
421            MapVariant::Ro(_) => Ok(()),
422            MapVariant::Cow(_) => Ok(()), // no-op for COW
423            MapVariant::Rw(lock) => {
424                // Fast path: no pending writes => skip flushing I/O
425                if *self.inner.written_since_last_flush.read() == 0 {
426                    return Ok(());
427                }
428
429                // Platform-optimized path: Linux MS_ASYNC best-effort
430                #[cfg(all(unix, target_os = "linux"))]
431                {
432                    if let Ok(len) = self.current_len() {
433                        if len > 0 && self.try_linux_async_flush(len as usize)? {
434                            return Ok(());
435                        }
436                    }
437                }
438
439                // Fallback/full flush using memmap2 API
440                let guard = lock.read();
441                guard
442                    .flush()
443                    .map_err(|e| MmapIoError::FlushFailed(e.to_string()))?;
444                // Reset accumulator after a successful flush
445                *self.inner.written_since_last_flush.write() = 0;
446                Ok(())
447            }
448        }
449    }
450
451    /// Async flush changes to disk. For read-only or COW mappings, this is a no-op.
452    /// This method enforces "async-only flushing" semantics for async paths.
453    #[cfg(feature = "async")]
454    pub async fn flush_async(&self) -> Result<()> {
455        // Use spawn_blocking to avoid blocking the async scheduler
456        let this = self.clone();
457        tokio::task::spawn_blocking(move || this.flush())
458            .await
459            .map_err(|e| MmapIoError::FlushFailed(format!("join error: {e}")))?
460    }
461
462    /// Async flush a specific byte range to disk.
463    #[cfg(feature = "async")]
464    pub async fn flush_range_async(&self, offset: u64, len: u64) -> Result<()> {
465        let this = self.clone();
466        tokio::task::spawn_blocking(move || this.flush_range(offset, len))
467            .await
468            .map_err(|e| MmapIoError::FlushFailed(format!("join error: {e}")))?
469    }
470
471    /// Flush a specific byte range to disk.
472    ///
473    /// Smart internal guards:
474    /// - Skip I/O when there are no pending writes in accumulator
475    /// - Optimize microflushes (< page size) with page-aligned batching
476    /// - On Linux, prefer msync(MS_ASYNC) for the range; fall back to full range flush on error
477    ///
478    /// # Performance Optimizations
479    ///
480    /// - **Microflush Detection**: Ranges smaller than page size are batched
481    /// - **Page Alignment**: Small ranges are expanded to page boundaries
482    /// - **Async Hints**: Linux uses MS_ASYNC for better performance
483    /// - **Zero-Copy**: No data copying during flush operations
484    ///
485    /// # Errors
486    ///
487    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
488    /// Returns `MmapIoError::FlushFailed` if flush operation fails.
489    pub fn flush_range(&self, offset: u64, len: u64) -> Result<()> {
490        if len == 0 {
491            return Ok(());
492        }
493        ensure_in_bounds(offset, len, self.current_len()?)?;
494        match &self.inner.map {
495            MapVariant::Ro(_) => Ok(()),
496            MapVariant::Cow(_) => Ok(()), // no-op for COW
497            MapVariant::Rw(lock) => {
498                // If we have no accumulated writes, skip I/O
499                if *self.inner.written_since_last_flush.read() == 0 {
500                    return Ok(());
501                }
502
503                let (start, end) = slice_range(offset, len, self.current_len()?)?;
504                let range_len = end - start;
505
506                // Microflush optimization: For small ranges, align to page boundaries
507                // to reduce syscall overhead and improve cache locality
508                let (optimized_start, optimized_len) = if range_len < crate::utils::page_size() {
509                    use crate::utils::{align_up, page_size};
510                    let page_sz = page_size();
511                    let aligned_start = (start / page_sz) * page_sz;
512                    let aligned_end = align_up(end as u64, page_sz as u64) as usize;
513                    let file_len = self.current_len()? as usize;
514                    let bounded_end = std::cmp::min(aligned_end, file_len);
515                    let bounded_len = bounded_end.saturating_sub(aligned_start);
516                    (aligned_start, bounded_len)
517                } else {
518                    (start, range_len)
519                };
520
521                // Linux MS_ASYNC optimization
522                #[cfg(all(unix, target_os = "linux"))]
523                {
524                    // SAFETY: msync on a valid mapped range. We translate to a pointer within the map.
525                    let msync_res: i32 = {
526                        let guard = lock.read();
527                        let base = guard.as_ptr();
528                        let ptr = unsafe { base.add(optimized_start) } as *mut libc::c_void;
529                        unsafe { libc::msync(ptr, optimized_len, libc::MS_ASYNC) }
530                    };
531                    if msync_res == 0 {
532                        // Consider MS_ASYNC success and reset accumulator
533                        *self.inner.written_since_last_flush.write() = 0;
534                        return Ok(());
535                    }
536                    // else fall through to full flush_range
537                }
538
539                let guard = lock.read();
540                guard
541                    .flush_range(optimized_start, optimized_len)
542                    .map_err(|e| MmapIoError::FlushFailed(e.to_string()))?;
543                // Reset accumulator after a successful flush
544                *self.inner.written_since_last_flush.write() = 0;
545                Ok(())
546            }
547        }
548    }
549
550    /// Resize (grow or shrink) the mapped file (RW only). This remaps the file internally.
551    ///
552    /// # Performance
553    ///
554    /// - **Time Complexity**: O(1) for the remap operation
555    /// - **Memory Usage**: Allocates new virtual address space of `new_size`
556    /// - **I/O Operations**: File truncate/extend + new mmap syscall
557    /// - **Note**: Existing pointers/slices become invalid after resize
558    ///
559    /// # Errors
560    ///
561    /// Returns `MmapIoError::InvalidMode` if not in `ReadWrite` mode.
562    /// Returns `MmapIoError::ResizeFailed` if new size is zero or exceeds the maximum safe limit.
563    /// Returns `MmapIoError::Io` if resize operation fails.
564    pub fn resize(&self, new_size: u64) -> Result<()> {
565        if self.inner.mode != MmapMode::ReadWrite {
566            return Err(MmapIoError::InvalidMode("Resize requires ReadWrite mode"));
567        }
568        if new_size == 0 {
569            return Err(MmapIoError::ResizeFailed(
570                "New size must be greater than zero".into(),
571            ));
572        }
573        if new_size > MAX_MMAP_SIZE {
574            return Err(MmapIoError::ResizeFailed(format!(
575                "New size {new_size} exceeds maximum safe limit of {MAX_MMAP_SIZE} bytes"
576            )));
577        }
578
579        let current = self.current_len()?;
580
581        // On Windows, shrinking a file with an active mapping fails with:
582        // "The requested operation cannot be performed on a file with a user-mapped section open."
583        // To keep APIs usable and tests passing, we virtually shrink by updating the cached length,
584        // avoiding truncation while a mapping is active. Growing still truncates and remaps.
585        #[cfg(windows)]
586        {
587            use std::cmp::Ordering;
588            match new_size.cmp(&current) {
589                Ordering::Less => {
590                    // Virtually shrink: only update the cached length.
591                    *self.inner.cached_len.write() = new_size;
592                    return Ok(());
593                }
594                Ordering::Equal => {
595                    return Ok(());
596                }
597                Ordering::Greater => {
598                    // Proceed with normal grow: extend file then remap.
599                }
600            }
601        }
602
603        // Update length on disk for non-windows, or for growing on windows.
604        // Silence unused variable warning when the Windows shrink early-return path is compiled.
605        let _ = &current;
606        self.inner.file.set_len(new_size)?;
607
608        // Remap with the new size.
609        let new_map = unsafe { MmapMut::map_mut(&self.inner.file)? };
610        match &self.inner.map {
611            MapVariant::Ro(_) => Err(MmapIoError::InvalidMode(
612                "Cannot remap read-only mapping as read-write",
613            )),
614            MapVariant::Cow(_) => Err(MmapIoError::InvalidMode(
615                "resize not supported on copy-on-write mapping",
616            )),
617            MapVariant::Rw(lock) => {
618                let mut guard = lock.write();
619                *guard = new_map;
620                // Update cached length
621                *self.inner.cached_len.write() = new_size;
622                Ok(())
623            }
624        }
625    }
626
627    /// Path to the underlying file.
628    #[must_use]
629    pub fn path(&self) -> &Path {
630        &self.inner.path
631    }
632
633    /// Touch (prewarm) pages by reading the first byte of each page.
634    /// This forces the OS to load all pages into physical memory, eliminating
635    /// page faults during subsequent access. Useful for benchmarking and
636    /// performance-critical sections.
637    ///
638    /// # Performance
639    ///
640    /// - **Time Complexity**: O(n) where n is the number of pages
641    /// - **Memory Usage**: Forces all pages into physical memory
642    /// - **I/O Operations**: May trigger disk reads for unmapped pages
643    /// - **Cache Behavior**: Optimizes subsequent access patterns
644    ///
645    /// # Examples
646    ///
647    /// ```no_run
648    /// use mmap_io::MemoryMappedFile;
649    ///
650    /// let mmap = MemoryMappedFile::open_ro("data.bin")?;
651    ///
652    /// // Prewarm all pages before performance-critical section
653    /// mmap.touch_pages()?;
654    ///
655    /// // Now all subsequent accesses will be fast (no page faults)
656    /// let data = mmap.as_slice(0, 1024)?;
657    /// # Ok::<(), mmap_io::MmapIoError>(())
658    /// ```
659    ///
660    /// # Errors
661    ///
662    /// Returns `MmapIoError::Io` if memory access fails.
663    pub fn touch_pages(&self) -> Result<()> {
664        use crate::utils::page_size;
665
666        let total_len = self.current_len()?;
667        if total_len == 0 {
668            return Ok(());
669        }
670
671        let page_sz = page_size() as u64;
672        let mut offset = 0;
673
674        // Touch the first byte of each page to force it into memory
675        while offset < total_len {
676            // Read a single byte to trigger page fault if needed
677            let mut buf = [0u8; 1];
678            let read_len = std::cmp::min(1, total_len - offset);
679            if read_len > 0 {
680                self.read_into(offset, &mut buf[..read_len as usize])?;
681            }
682            offset += page_sz;
683        }
684
685        Ok(())
686    }
687
688    /// Touch (prewarm) a specific range of pages.
689    /// Similar to `touch_pages()` but only affects the specified range.
690    ///
691    /// # Arguments
692    ///
693    /// * `offset` - Starting offset in bytes
694    /// * `len` - Length of range to touch in bytes
695    ///
696    /// # Errors
697    ///
698    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
699    /// Returns `MmapIoError::Io` if memory access fails.
700    pub fn touch_pages_range(&self, offset: u64, len: u64) -> Result<()> {
701        use crate::utils::{align_up, page_size};
702
703        if len == 0 {
704            return Ok(());
705        }
706
707        let total_len = self.current_len()?;
708        crate::utils::ensure_in_bounds(offset, len, total_len)?;
709
710        let page_sz = page_size() as u64;
711        let start_page = (offset / page_sz) * page_sz;
712        let end_offset = offset + len;
713        let end_page = align_up(end_offset, page_sz);
714
715        let mut page_offset = start_page;
716
717        // Touch the first byte of each page in the range
718        while page_offset < end_page && page_offset < total_len {
719            let mut buf = [0u8; 1];
720            let read_len = std::cmp::min(1, total_len - page_offset);
721            if read_len > 0 {
722                self.read_into(page_offset, &mut buf[..read_len as usize])?;
723            }
724            page_offset += page_sz;
725        }
726
727        Ok(())
728    }
729}
730
731impl MemoryMappedFile {
732    // Helper method to attempt Linux-specific async flush
733    #[cfg(all(unix, target_os = "linux"))]
734    fn try_linux_async_flush(&self, len: usize) -> Result<bool> {
735        use std::os::fd::AsRawFd;
736
737        // Get the file descriptor (unused but kept for potential future use)
738        let _fd = self.inner.file.as_raw_fd();
739
740        // Try to get the mapping pointer for msync
741        match &self.inner.map {
742            MapVariant::Rw(lock) => {
743                let guard = lock.read();
744                let ptr = guard.as_ptr() as *mut libc::c_void;
745
746                // SAFETY: msync requires a valid mapping address/len; memmap2 handles mapping
747                let ret = unsafe { libc::msync(ptr, len, libc::MS_ASYNC) };
748
749                if ret == 0 {
750                    // MS_ASYNC succeeded, reset accumulator
751                    *self.inner.written_since_last_flush.write() = 0;
752                    Ok(true)
753                } else {
754                    // Fall back to full flush
755                    Ok(false)
756                }
757            }
758            _ => Ok(false),
759        }
760    }
761}
762
763// (Removed duplicate import of RwLockWriteGuard)
764
765/// Create a memory mapping with optional huge pages support.
766///
767/// When `huge` is true on Linux, this function attempts to use actual huge pages
768/// via MAP_HUGETLB first, falling back to Transparent Huge Pages (THP) if that fails.
769///
770/// **Fallback Behavior**: This is a best-effort optimization:
771/// 1. First attempts MAP_HUGETLB for guaranteed huge pages
772/// 2. Falls back to regular mapping with MADV_HUGEPAGE hint
773/// 3. Finally falls back to regular pages if THP is unavailable
774///
775/// The function will silently fall back through these options and never fail
776/// due to huge page unavailability alone.
777#[cfg(feature = "hugepages")]
778fn map_mut_with_options(file: &File, len: u64, huge: bool) -> Result<MmapMut> {
779    #[cfg(all(unix, target_os = "linux"))]
780    {
781        if huge {
782            // First, try to create a mapping that can accommodate huge pages
783            // by aligning to huge page boundaries
784            if let Ok(mmap) = try_create_optimized_mapping(file, len) {
785                log::debug!("Successfully created optimized mapping for huge pages");
786                return Ok(mmap);
787            }
788        }
789
790        // Create standard mapping
791        let mmap = unsafe { MmapMut::map_mut(file) }.map_err(MmapIoError::Io)?;
792
793        if huge {
794            // Request Transparent Huge Pages (THP) for existing mapping
795            // This is a hint to the kernel - not a guarantee
796            unsafe {
797                let mmap_ptr = mmap.as_ptr() as *mut libc::c_void;
798
799                // MADV_HUGEPAGE: Enable THP for this memory region
800                let ret = libc::madvise(mmap_ptr, len as usize, libc::MADV_HUGEPAGE);
801
802                if ret == 0 {
803                    log::debug!("Successfully requested THP for {} bytes", len);
804                } else {
805                    log::debug!("madvise(MADV_HUGEPAGE) failed, using regular pages");
806                }
807            }
808        }
809
810        Ok(mmap)
811    }
812    #[cfg(not(all(unix, target_os = "linux")))]
813    {
814        // Huge pages are Linux-specific, ignore the flag on other platforms
815        let _ = (len, huge);
816        unsafe { MmapMut::map_mut(file) }.map_err(MmapIoError::Io)
817    }
818}
819
820/// Create an optimized mapping that's more likely to use huge pages.
821/// This function tries to create mappings that are aligned and sized
822/// appropriately for huge page usage.
823#[cfg(all(unix, target_os = "linux", feature = "hugepages"))]
824fn try_create_optimized_mapping(file: &File, len: u64) -> Result<MmapMut> {
825    // For files larger than 2MB (typical huge page size), we can try to optimize
826    const HUGE_PAGE_SIZE: u64 = 2 * 1024 * 1024; // 2MB
827
828    if len >= HUGE_PAGE_SIZE {
829        // Create the mapping and immediately advise huge pages
830        let mmap = unsafe { MmapMut::map_mut(file) }.map_err(MmapIoError::Io)?;
831
832        unsafe {
833            let mmap_ptr = mmap.as_ptr() as *mut libc::c_void;
834
835            // First try MADV_HUGEPAGE
836            let ret = libc::madvise(mmap_ptr, len as usize, libc::MADV_HUGEPAGE);
837
838            if ret == 0 {
839                // Then try to populate the mapping to encourage huge page allocation
840                // MADV_POPULATE_WRITE is relatively new, so we'll use a fallback
841                #[cfg(target_os = "linux")]
842                {
843                    const MADV_POPULATE_WRITE: i32 = 23; // Define the constant manually
844                    let populate_ret = libc::madvise(mmap_ptr, len as usize, MADV_POPULATE_WRITE);
845
846                    if populate_ret == 0 {
847                        log::debug!("Successfully created and populated optimized mapping");
848                    } else {
849                        log::debug!(
850                            "Optimization successful, populate failed (expected on older kernels)"
851                        );
852                    }
853                }
854                #[cfg(not(target_os = "linux"))]
855                {
856                    log::debug!("Successfully created optimized mapping (populate not available)");
857                }
858            }
859        }
860
861        Ok(mmap)
862    } else {
863        Err(MmapIoError::Io(std::io::Error::new(
864            std::io::ErrorKind::InvalidInput,
865            "File too small for huge page optimization",
866        )))
867    }
868}
869
870#[cfg(not(all(unix, target_os = "linux", feature = "hugepages")))]
871#[allow(dead_code)]
872fn try_create_optimized_mapping(_file: &File, _len: u64) -> Result<MmapMut> {
873    Err(MmapIoError::Io(std::io::Error::new(
874        std::io::ErrorKind::Unsupported,
875        "Huge pages not supported on this platform",
876    )))
877}
878
879#[cfg(feature = "cow")]
880impl MemoryMappedFile {
881    /// Open an existing file and memory-map it copy-on-write (private).
882    /// Changes through this mapping are visible only within this process; the underlying file remains unchanged.
883    pub fn open_cow<P: AsRef<Path>>(path: P) -> Result<Self> {
884        let path_ref = path.as_ref();
885        let file = OpenOptions::new().read(true).open(path_ref)?;
886        let len = file.metadata()?.len();
887        if len == 0 {
888            return Err(MmapIoError::ResizeFailed(ERR_ZERO_LENGTH_FILE.into()));
889        }
890        // SAFETY: memmap2 handles platform specifics. We request a private (copy-on-write) mapping.
891        let mmap = unsafe {
892            let mut opts = MmapOptions::new();
893            opts.len(len as usize);
894            #[cfg(unix)]
895            {
896                // memmap2 currently does not expose a stable .private() on all Rust/MSRV combos.
897                // On Unix, map() of a read-only file yields an immutable mapping; for COW semantics
898                // we rely on platform-specific behavior when writing is disallowed here in phase-1.
899                // When writable COW is introduced, we will use platform flags via memmap2 internals.
900                opts.map(&file)?
901            }
902            #[cfg(not(unix))]
903            {
904                // On Windows, memmap2 maps with appropriate WRITECOPY semantics internally for private mappings.
905                opts.map(&file)?
906            }
907        };
908        let inner = Inner {
909            path: path_ref.to_path_buf(),
910            file,
911            mode: MmapMode::CopyOnWrite,
912            cached_len: RwLock::new(len),
913            map: MapVariant::Cow(mmap),
914            // COW never flushes underlying file in phase-1
915            flush_policy: FlushPolicy::Never,
916            written_since_last_flush: RwLock::new(0),
917            #[cfg(feature = "hugepages")]
918            huge_pages: false,
919        };
920        Ok(Self {
921            inner: Arc::new(inner),
922        })
923    }
924}
925
926impl MemoryMappedFile {
927    fn apply_flush_policy(&self, written: u64) -> Result<()> {
928        match self.inner.flush_policy {
929            FlushPolicy::Never | FlushPolicy::Manual => Ok(()),
930            FlushPolicy::Always => {
931                // Record then flush immediately
932                *self.inner.written_since_last_flush.write() += written;
933                self.flush()
934            }
935            FlushPolicy::EveryBytes(n) => {
936                let n = n as u64;
937                if n == 0 {
938                    return Ok(());
939                }
940                let mut acc = self.inner.written_since_last_flush.write();
941                *acc += written;
942                if *acc >= n {
943                    // Do not reset prematurely; let flush() clear on success
944                    drop(acc);
945                    self.flush()
946                } else {
947                    Ok(())
948                }
949            }
950            FlushPolicy::EveryWrites(w) => {
951                if w == 0 {
952                    return Ok(());
953                }
954                let mut acc = self.inner.written_since_last_flush.write();
955                *acc += 1;
956                if *acc >= w as u64 {
957                    drop(acc);
958                    self.flush()
959                } else {
960                    Ok(())
961                }
962            }
963            FlushPolicy::EveryMillis(ms) => {
964                if ms == 0 {
965                    return Ok(());
966                }
967
968                // Record the write
969                *self.inner.written_since_last_flush.write() += written;
970
971                // For EveryMillis, time-based flushing is handled by the background thread
972                // The policy just ensures writes are tracked
973                Ok(())
974            }
975        }
976    }
977
978    /// Return the up-to-date file length (cached).
979    /// This ensures length remains correct even after resize.
980    ///
981    /// # Errors
982    ///
983    /// Returns `MmapIoError::Io` if metadata query fails (not expected in current implementation).
984    pub fn current_len(&self) -> Result<u64> {
985        Ok(*self.inner.cached_len.read())
986    }
987
988    /// Read bytes from the mapping into the provided buffer starting at `offset`.
989    /// Length is `buf.len()`; performs bounds checks.
990    ///
991    /// # Performance
992    ///
993    /// - **Time Complexity**: O(n) where n is buf.len()
994    /// - **Memory Usage**: Uses provided buffer, no additional allocation
995    /// - **Cache Behavior**: Sequential access pattern is cache-friendly
996    ///
997    /// # Errors
998    ///
999    /// Returns `MmapIoError::OutOfBounds` if range exceeds file bounds.
1000    pub fn read_into(&self, offset: u64, buf: &mut [u8]) -> Result<()> {
1001        let total = self.current_len()?;
1002        let len = buf.len() as u64;
1003        ensure_in_bounds(offset, len, total)?;
1004        match &self.inner.map {
1005            MapVariant::Ro(m) => {
1006                let (start, end) = slice_range(offset, len, total)?;
1007                buf.copy_from_slice(&m[start..end]);
1008                Ok(())
1009            }
1010            MapVariant::Rw(lock) => {
1011                let guard = lock.read();
1012                let (start, end) = slice_range(offset, len, total)?;
1013                buf.copy_from_slice(&guard[start..end]);
1014                Ok(())
1015            }
1016            MapVariant::Cow(m) => {
1017                let (start, end) = slice_range(offset, len, total)?;
1018                buf.copy_from_slice(&m[start..end]);
1019                Ok(())
1020            }
1021        }
1022    }
1023}
1024
1025/// Builder for MemoryMappedFile construction with options.
1026pub struct MemoryMappedFileBuilder {
1027    path: PathBuf,
1028    size: Option<u64>,
1029    mode: Option<MmapMode>,
1030    flush_policy: FlushPolicy,
1031    touch_hint: TouchHint,
1032    #[cfg(feature = "hugepages")]
1033    huge_pages: bool,
1034}
1035
1036impl MemoryMappedFileBuilder {
1037    /// Specify the size (required for create/ReadWrite new files).
1038    pub fn size(mut self, size: u64) -> Self {
1039        self.size = Some(size);
1040        self
1041    }
1042
1043    /// Specify the mode (ReadOnly, ReadWrite, CopyOnWrite).
1044    pub fn mode(mut self, mode: MmapMode) -> Self {
1045        self.mode = Some(mode);
1046        self
1047    }
1048
1049    /// Specify the flush policy.
1050    pub fn flush_policy(mut self, policy: FlushPolicy) -> Self {
1051        self.flush_policy = policy;
1052        self
1053    }
1054
1055    /// Specify when to touch (prewarm) memory pages.
1056    pub fn touch_hint(mut self, hint: TouchHint) -> Self {
1057        self.touch_hint = hint;
1058        self
1059    }
1060
1061    /// Request Huge Pages (Linux MAP_HUGETLB). No-op on non-Linux platforms.
1062    #[cfg(feature = "hugepages")]
1063    pub fn huge_pages(mut self, enable: bool) -> Self {
1064        self.huge_pages = enable;
1065        self
1066    }
1067
1068    /// Create a new mapping; for ReadWrite requires size for creation.
1069    pub fn create(self) -> Result<MemoryMappedFile> {
1070        let mode = self.mode.unwrap_or(MmapMode::ReadWrite);
1071        match mode {
1072            MmapMode::ReadWrite => {
1073                let size = self.size.ok_or_else(|| {
1074                    MmapIoError::ResizeFailed(
1075                        "Size must be set for create() in ReadWrite mode".into(),
1076                    )
1077                })?;
1078                if size == 0 {
1079                    return Err(MmapIoError::ResizeFailed(ERR_ZERO_SIZE.into()));
1080                }
1081                if size > MAX_MMAP_SIZE {
1082                    return Err(MmapIoError::ResizeFailed(format!(
1083                        "Size {size} exceeds maximum safe limit of {MAX_MMAP_SIZE} bytes"
1084                    )));
1085                }
1086                let path_ref = &self.path;
1087                let file = OpenOptions::new()
1088                    .create(true)
1089                    .write(true)
1090                    .read(true)
1091                    .truncate(true)
1092                    .open(path_ref)?;
1093                file.set_len(size)?;
1094                // Map with consideration for huge pages if requested
1095                #[cfg(feature = "hugepages")]
1096                let mmap = map_mut_with_options(&file, size, self.huge_pages)?;
1097                #[cfg(not(feature = "hugepages"))]
1098                let mmap = unsafe { MmapMut::map_mut(&file)? };
1099
1100                // Set up time-based flusher if needed (placeholder, not used)
1101                if let FlushPolicy::EveryMillis(ms) = self.flush_policy {
1102                    if ms > 0 {
1103                        let mmap_weak: std::sync::Weak<Inner> = std::sync::Weak::new(); // Will be set after Arc creation
1104                        crate::flush::TimeBasedFlusher::new(ms, move || {
1105                            // Try to upgrade weak reference to check if mmap still exists
1106                            if let Some(inner) = mmap_weak.upgrade() {
1107                                // Check if there are pending writes
1108                                let pending = *inner.written_since_last_flush.read() > 0;
1109                                if pending {
1110                                    // Create a temp MemoryMappedFile to call flush
1111                                    let temp_mmap = MemoryMappedFile { inner };
1112                                    if temp_mmap.flush().is_ok() {
1113                                        return true; // Successfully flushed
1114                                    }
1115                                }
1116                            }
1117                            false // No flush performed
1118                        });
1119                    }
1120                }
1121
1122                let inner = Inner {
1123                    path: path_ref.clone(),
1124                    file,
1125                    mode,
1126                    cached_len: RwLock::new(size),
1127                    map: MapVariant::Rw(RwLock::new(mmap)),
1128                    flush_policy: self.flush_policy,
1129                    written_since_last_flush: RwLock::new(0),
1130                    #[cfg(feature = "hugepages")]
1131                    huge_pages: self.huge_pages,
1132                };
1133
1134                let mmap_file = MemoryMappedFile {
1135                    inner: Arc::new(inner),
1136                };
1137
1138                // Apply touch hint if specified
1139                if self.touch_hint == TouchHint::Eager {
1140                    log::debug!("Eagerly touching all pages for {size} bytes");
1141                    if let Err(e) = mmap_file.touch_pages() {
1142                        log::warn!("Failed to eagerly touch pages: {e}");
1143                        // Don't fail the creation, just log the warning
1144                    }
1145                }
1146
1147                // If we have a time flusher, we need to set up the weak reference properly
1148                // This is a placeholder for future implementation.
1149                if let FlushPolicy::EveryMillis(ms) = self.flush_policy {
1150                    if ms > 0 {
1151                        log::debug!(
1152                            "Time-based flushing policy set to {ms} ms (implementation simplified)"
1153                        );
1154                    }
1155                }
1156
1157                Ok(mmap_file)
1158            }
1159            MmapMode::ReadOnly => {
1160                let path_ref = &self.path;
1161                let file = OpenOptions::new().read(true).open(path_ref)?;
1162                let len = file.metadata()?.len();
1163                let mmap = unsafe { Mmap::map(&file)? };
1164                let inner = Inner {
1165                    path: path_ref.clone(),
1166                    file,
1167                    mode,
1168                    cached_len: RwLock::new(len),
1169                    map: MapVariant::Ro(mmap),
1170                    flush_policy: FlushPolicy::Never,
1171                    written_since_last_flush: RwLock::new(0),
1172                    #[cfg(feature = "hugepages")]
1173                    huge_pages: false,
1174                };
1175                Ok(MemoryMappedFile {
1176                    inner: Arc::new(inner),
1177                })
1178            }
1179            #[cfg(feature = "cow")]
1180            MmapMode::CopyOnWrite => {
1181                let path_ref = &self.path;
1182                let file = OpenOptions::new().read(true).open(path_ref)?;
1183                let len = file.metadata()?.len();
1184                if len == 0 {
1185                    return Err(MmapIoError::ResizeFailed(ERR_ZERO_LENGTH_FILE.into()));
1186                }
1187                let mmap = unsafe {
1188                    let mut opts = MmapOptions::new();
1189                    opts.len(len as usize);
1190                    opts.map(&file)?
1191                };
1192                let inner = Inner {
1193                    path: path_ref.clone(),
1194                    file,
1195                    mode,
1196                    cached_len: RwLock::new(len),
1197                    map: MapVariant::Cow(mmap),
1198                    flush_policy: FlushPolicy::Never,
1199                    written_since_last_flush: RwLock::new(0),
1200                    #[cfg(feature = "hugepages")]
1201                    huge_pages: false,
1202                };
1203                Ok(MemoryMappedFile {
1204                    inner: Arc::new(inner),
1205                })
1206            }
1207            #[cfg(not(feature = "cow"))]
1208            MmapMode::CopyOnWrite => Err(MmapIoError::InvalidMode(
1209                "CopyOnWrite mode requires 'cow' feature",
1210            )),
1211        }
1212    }
1213
1214    /// Open an existing file with provided mode (size ignored).
1215    pub fn open(self) -> Result<MemoryMappedFile> {
1216        let mode = self.mode.unwrap_or(MmapMode::ReadOnly);
1217        match mode {
1218            MmapMode::ReadOnly => {
1219                let path_ref = &self.path;
1220                let file = OpenOptions::new().read(true).open(path_ref)?;
1221                let len = file.metadata()?.len();
1222                let mmap = unsafe { Mmap::map(&file)? };
1223                let inner = Inner {
1224                    path: path_ref.clone(),
1225                    file,
1226                    mode,
1227                    cached_len: RwLock::new(len),
1228                    map: MapVariant::Ro(mmap),
1229                    flush_policy: FlushPolicy::Never,
1230                    written_since_last_flush: RwLock::new(0),
1231                    #[cfg(feature = "hugepages")]
1232                    huge_pages: false,
1233                };
1234                Ok(MemoryMappedFile {
1235                    inner: Arc::new(inner),
1236                })
1237            }
1238            MmapMode::ReadWrite => {
1239                let path_ref = &self.path;
1240                let file = OpenOptions::new().read(true).write(true).open(path_ref)?;
1241                let len = file.metadata()?.len();
1242                if len == 0 {
1243                    return Err(MmapIoError::ResizeFailed(ERR_ZERO_LENGTH_FILE.into()));
1244                }
1245                #[cfg(feature = "hugepages")]
1246                let mmap = map_mut_with_options(&file, len, self.huge_pages)?;
1247                #[cfg(not(feature = "hugepages"))]
1248                let mmap = unsafe { MmapMut::map_mut(&file)? };
1249                let inner = Inner {
1250                    path: path_ref.clone(),
1251                    file,
1252                    mode,
1253                    cached_len: RwLock::new(len),
1254                    map: MapVariant::Rw(RwLock::new(mmap)),
1255                    flush_policy: self.flush_policy,
1256                    written_since_last_flush: RwLock::new(0),
1257                    #[cfg(feature = "hugepages")]
1258                    huge_pages: self.huge_pages,
1259                };
1260                Ok(MemoryMappedFile {
1261                    inner: Arc::new(inner),
1262                })
1263            }
1264            #[cfg(feature = "cow")]
1265            MmapMode::CopyOnWrite => {
1266                let path_ref = &self.path;
1267                let file = OpenOptions::new().read(true).open(path_ref)?;
1268                let len = file.metadata()?.len();
1269                if len == 0 {
1270                    return Err(MmapIoError::ResizeFailed(ERR_ZERO_LENGTH_FILE.into()));
1271                }
1272                let mmap = unsafe {
1273                    let mut opts = MmapOptions::new();
1274                    opts.len(len as usize);
1275                    opts.map(&file)?
1276                };
1277                let inner = Inner {
1278                    path: path_ref.clone(),
1279                    file,
1280                    mode,
1281                    cached_len: RwLock::new(len),
1282                    map: MapVariant::Cow(mmap),
1283                    flush_policy: FlushPolicy::Never,
1284                    written_since_last_flush: RwLock::new(0),
1285                    #[cfg(feature = "hugepages")]
1286                    huge_pages: false,
1287                };
1288                Ok(MemoryMappedFile {
1289                    inner: Arc::new(inner),
1290                })
1291            }
1292            #[cfg(not(feature = "cow"))]
1293            MmapMode::CopyOnWrite => Err(MmapIoError::InvalidMode(
1294                "CopyOnWrite mode requires 'cow' feature",
1295            )),
1296        }
1297    }
1298}
1299
1300// Move this to the top-level with other use statements:
1301use parking_lot::RwLockWriteGuard;
1302
1303/// Wrapper for a mutable slice that holds a write lock guard,
1304/// ensuring exclusive access for the lifetime of the slice.
1305pub struct MappedSliceMut<'a> {
1306    guard: RwLockWriteGuard<'a, MmapMut>,
1307    range: std::ops::Range<usize>,
1308}
1309
1310impl<'a> MappedSliceMut<'a> {
1311    /// Get the mutable slice.
1312    ///
1313    /// Note: This method is intentionally named `as_mut` for consistency,
1314    /// even though it conflicts with the standard trait naming.
1315    #[allow(clippy::should_implement_trait)]
1316    pub fn as_mut(&mut self) -> &mut [u8] {
1317        // Avoid clone by using the range directly
1318        let start = self.range.start;
1319        let end = self.range.end;
1320        &mut self.guard[start..end]
1321    }
1322}