1#![allow(unsafe_code)]
13
14use crate::error::{OxiGdalError, Result};
15use std::fs::File;
16use std::io;
17use std::ops::Deref;
18use std::os::unix::io::AsRawFd;
19use std::path::Path;
20use std::ptr::NonNull;
21use std::sync::Arc;
22use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
23
24#[derive(Debug, Clone, Copy, PartialEq, Eq)]
26pub enum MemoryMapMode {
27 ReadOnly,
29 ReadWrite,
31 CopyOnWrite,
33}
34
35#[derive(Debug, Clone, Copy, PartialEq, Eq)]
37pub enum AccessPattern {
38 Normal,
40 Sequential,
42 Random,
44 WillNeed,
46 DontNeed,
48}
49
50#[derive(Debug, Clone)]
52pub struct MemoryMapConfig {
53 pub mode: MemoryMapMode,
55 pub access_pattern: AccessPattern,
57 pub use_huge_pages: bool,
59 pub numa_node: i32,
61 pub populate: bool,
63 pub lock_memory: bool,
65 pub read_ahead_size: usize,
67}
68
69impl Default for MemoryMapConfig {
70 fn default() -> Self {
71 Self {
72 mode: MemoryMapMode::ReadOnly,
73 access_pattern: AccessPattern::Normal,
74 use_huge_pages: false,
75 numa_node: -1,
76 populate: false,
77 lock_memory: false,
78 read_ahead_size: 128 * 1024, }
80 }
81}
82
83impl MemoryMapConfig {
84 #[must_use]
86 pub fn new() -> Self {
87 Self::default()
88 }
89
90 #[must_use]
92 pub fn with_mode(mut self, mode: MemoryMapMode) -> Self {
93 self.mode = mode;
94 self
95 }
96
97 #[must_use]
99 pub fn with_access_pattern(mut self, pattern: AccessPattern) -> Self {
100 self.access_pattern = pattern;
101 self
102 }
103
104 #[must_use]
106 pub fn with_huge_pages(mut self, enable: bool) -> Self {
107 self.use_huge_pages = enable;
108 self
109 }
110
111 #[must_use]
113 pub fn with_numa_node(mut self, node: i32) -> Self {
114 self.numa_node = node;
115 self
116 }
117
118 #[must_use]
120 pub fn with_populate(mut self, populate: bool) -> Self {
121 self.populate = populate;
122 self
123 }
124
125 #[must_use]
127 pub fn with_lock_memory(mut self, lock: bool) -> Self {
128 self.lock_memory = lock;
129 self
130 }
131
132 #[must_use]
134 pub fn with_read_ahead_size(mut self, size: usize) -> Self {
135 self.read_ahead_size = size;
136 self
137 }
138}
139
140pub struct MemoryMap {
142 ptr: NonNull<u8>,
144 len: usize,
146 _file: Arc<File>,
148 #[allow(dead_code)]
150 config: MemoryMapConfig,
151 is_mutable: bool,
153 accesses: AtomicUsize,
155 is_locked: AtomicBool,
157}
158
159impl MemoryMap {
160 pub fn new<P: AsRef<Path>>(path: P) -> Result<Self> {
162 Self::with_config(path, MemoryMapConfig::default())
163 }
164
165 pub fn with_config<P: AsRef<Path>>(path: P, config: MemoryMapConfig) -> Result<Self> {
167 let file = match config.mode {
168 MemoryMapMode::ReadOnly => {
169 File::open(path).map_err(|e| OxiGdalError::io_error(e.to_string()))?
170 }
171 MemoryMapMode::ReadWrite | MemoryMapMode::CopyOnWrite => std::fs::OpenOptions::new()
172 .read(true)
173 .write(true)
174 .open(path)
175 .map_err(|e| OxiGdalError::io_error(e.to_string()))?,
176 };
177
178 let metadata = file
179 .metadata()
180 .map_err(|e| OxiGdalError::io_error(e.to_string()))?;
181 let len = metadata.len() as usize;
182
183 if len == 0 {
184 return Err(OxiGdalError::invalid_parameter(
185 "parameter",
186 "Cannot map empty file".to_string(),
187 ));
188 }
189
190 let is_mutable = matches!(
191 config.mode,
192 MemoryMapMode::ReadWrite | MemoryMapMode::CopyOnWrite
193 );
194
195 let ptr = unsafe { Self::map_file(&file, len, &config, is_mutable)? };
198
199 let map = Self {
200 ptr,
201 len,
202 _file: Arc::new(file),
203 config: config.clone(),
204 is_mutable,
205 accesses: AtomicUsize::new(0),
206 is_locked: AtomicBool::new(false),
207 };
208
209 map.apply_access_pattern()?;
211
212 if config.lock_memory {
214 map.lock()?;
215 }
216
217 Ok(map)
218 }
219
220 #[allow(unsafe_code)]
235 unsafe fn map_file(
236 file: &File,
237 len: usize,
238 config: &MemoryMapConfig,
239 is_mutable: bool,
240 ) -> Result<NonNull<u8>> {
241 unsafe {
244 let fd = file.as_raw_fd();
245
246 let prot = if is_mutable {
247 libc::PROT_READ | libc::PROT_WRITE
248 } else {
249 libc::PROT_READ
250 };
251
252 #[cfg(target_os = "linux")]
253 let mut flags = match config.mode {
254 MemoryMapMode::ReadOnly | MemoryMapMode::ReadWrite => libc::MAP_SHARED,
255 MemoryMapMode::CopyOnWrite => libc::MAP_PRIVATE,
256 };
257
258 #[cfg(not(target_os = "linux"))]
259 let flags = match config.mode {
260 MemoryMapMode::ReadOnly | MemoryMapMode::ReadWrite => libc::MAP_SHARED,
261 MemoryMapMode::CopyOnWrite => libc::MAP_PRIVATE,
262 };
263
264 if config.populate {
265 #[cfg(target_os = "linux")]
266 {
267 flags |= libc::MAP_POPULATE;
268 }
269 }
270
271 if config.use_huge_pages {
272 #[cfg(target_os = "linux")]
273 {
274 flags |= libc::MAP_HUGETLB;
275 }
276 }
277
278 let addr = libc::mmap(std::ptr::null_mut(), len, prot, flags, fd, 0);
279
280 if addr == libc::MAP_FAILED {
281 return Err(OxiGdalError::allocation_error(
282 io::Error::last_os_error().to_string(),
283 ));
284 }
285
286 NonNull::new(addr.cast::<u8>()).ok_or_else(|| {
287 OxiGdalError::allocation_error("mmap returned null pointer".to_string())
288 })
289 }
290 }
291
292 fn apply_access_pattern(&self) -> Result<()> {
294 #[cfg(target_os = "linux")]
295 {
296 let advice = match self.config.access_pattern {
297 AccessPattern::Normal => libc::MADV_NORMAL,
298 AccessPattern::Sequential => libc::MADV_SEQUENTIAL,
299 AccessPattern::Random => libc::MADV_RANDOM,
300 AccessPattern::WillNeed => libc::MADV_WILLNEED,
301 AccessPattern::DontNeed => libc::MADV_DONTNEED,
302 };
303
304 let result =
307 unsafe { libc::madvise(self.ptr.as_ptr() as *mut libc::c_void, self.len, advice) };
308
309 if result != 0 {
310 return Err(OxiGdalError::io_error(format!(
311 "madvise failed: {}",
312 io::Error::last_os_error()
313 )));
314 }
315 }
316
317 Ok(())
318 }
319
320 pub fn lock(&self) -> Result<()> {
322 if self.is_locked.load(Ordering::Relaxed) {
323 return Ok(());
324 }
325
326 #[cfg(target_os = "linux")]
327 {
328 let result = unsafe { libc::mlock(self.ptr.as_ptr() as *const libc::c_void, self.len) };
329
330 if result != 0 {
331 return Err(OxiGdalError::io_error(format!(
332 "mlock failed: {}",
333 io::Error::last_os_error()
334 )));
335 }
336
337 self.is_locked.store(true, Ordering::Relaxed);
338 }
339
340 Ok(())
341 }
342
343 pub fn unlock(&self) -> Result<()> {
345 if !self.is_locked.load(Ordering::Relaxed) {
346 return Ok(());
347 }
348
349 #[cfg(target_os = "linux")]
350 {
351 let result =
352 unsafe { libc::munlock(self.ptr.as_ptr() as *const libc::c_void, self.len) };
353
354 if result != 0 {
355 return Err(OxiGdalError::io_error(format!(
356 "munlock failed: {}",
357 io::Error::last_os_error()
358 )));
359 }
360
361 self.is_locked.store(false, Ordering::Relaxed);
362 }
363
364 Ok(())
365 }
366
367 pub fn prefetch(&self, offset: usize, len: usize) -> Result<()> {
369 if offset + len > self.len {
370 return Err(OxiGdalError::invalid_parameter(
371 "parameter",
372 "Prefetch range exceeds mapping size".to_string(),
373 ));
374 }
375
376 #[cfg(target_os = "linux")]
377 {
378 let ptr = unsafe { self.ptr.as_ptr().add(offset) };
379 let result =
380 unsafe { libc::madvise(ptr as *mut libc::c_void, len, libc::MADV_WILLNEED) };
381
382 if result != 0 {
383 return Err(OxiGdalError::io_error(format!(
384 "prefetch madvise failed: {}",
385 io::Error::last_os_error()
386 )));
387 }
388 }
389
390 Ok(())
391 }
392
393 pub fn evict(&self, offset: usize, len: usize) -> Result<()> {
395 if offset + len > self.len {
396 return Err(OxiGdalError::invalid_parameter(
397 "parameter",
398 "Evict range exceeds mapping size".to_string(),
399 ));
400 }
401
402 #[cfg(target_os = "linux")]
403 {
404 let ptr = unsafe { self.ptr.as_ptr().add(offset) };
405 let result =
406 unsafe { libc::madvise(ptr as *mut libc::c_void, len, libc::MADV_DONTNEED) };
407
408 if result != 0 {
409 return Err(OxiGdalError::io_error(format!(
410 "evict madvise failed: {}",
411 io::Error::last_os_error()
412 )));
413 }
414 }
415
416 Ok(())
417 }
418
419 pub fn flush(&self) -> Result<()> {
421 if !self.is_mutable {
422 return Ok(());
423 }
424
425 let result = unsafe {
428 libc::msync(
429 self.ptr.as_ptr().cast::<libc::c_void>(),
430 self.len,
431 libc::MS_SYNC,
432 )
433 };
434
435 if result != 0 {
436 return Err(OxiGdalError::io_error(format!(
437 "msync failed: {}",
438 io::Error::last_os_error()
439 )));
440 }
441
442 Ok(())
443 }
444
445 pub fn flush_async(&self, offset: usize, len: usize) -> Result<()> {
447 if !self.is_mutable {
448 return Ok(());
449 }
450
451 if offset + len > self.len {
452 return Err(OxiGdalError::invalid_parameter(
453 "parameter",
454 "Flush range exceeds mapping size".to_string(),
455 ));
456 }
457
458 let ptr = unsafe { self.ptr.as_ptr().add(offset) };
461 let result = unsafe { libc::msync(ptr.cast::<libc::c_void>(), len, libc::MS_ASYNC) };
463
464 if result != 0 {
465 return Err(OxiGdalError::io_error(format!(
466 "async msync failed: {}",
467 io::Error::last_os_error()
468 )));
469 }
470
471 Ok(())
472 }
473
474 pub fn len(&self) -> usize {
476 self.len
477 }
478
479 pub fn is_empty(&self) -> bool {
481 self.len == 0
482 }
483
484 pub fn access_count(&self) -> usize {
486 self.accesses.load(Ordering::Relaxed)
487 }
488
489 fn record_access(&self) {
491 self.accesses.fetch_add(1, Ordering::Relaxed);
492 }
493
494 pub fn as_slice(&self) -> &[u8] {
496 self.record_access();
497 unsafe { std::slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
500 }
501
502 pub fn as_mut_slice(&mut self) -> Result<&mut [u8]> {
504 if !self.is_mutable {
505 return Err(OxiGdalError::invalid_operation(
506 "Cannot get mutable slice from read-only mapping".to_string(),
507 ));
508 }
509 self.record_access();
510 Ok(unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) })
513 }
514
515 pub fn as_typed_slice<T: bytemuck::Pod>(&self) -> Result<&[T]> {
517 self.record_access();
518
519 if self.len % std::mem::size_of::<T>() != 0 {
520 return Err(OxiGdalError::invalid_parameter(
521 "parameter",
522 "Mapping size not aligned to type size".to_string(),
523 ));
524 }
525
526 let count = self.len / std::mem::size_of::<T>();
527 Ok(unsafe { std::slice::from_raw_parts(self.ptr.as_ptr() as *const T, count) })
530 }
531
532 pub fn as_typed_mut_slice<T: bytemuck::Pod>(&mut self) -> Result<&mut [T]> {
534 if !self.is_mutable {
535 return Err(OxiGdalError::invalid_operation(
536 "Cannot get mutable slice from read-only mapping".to_string(),
537 ));
538 }
539
540 self.record_access();
541
542 if self.len % std::mem::size_of::<T>() != 0 {
543 return Err(OxiGdalError::invalid_parameter(
544 "parameter",
545 "Mapping size not aligned to type size".to_string(),
546 ));
547 }
548
549 let count = self.len / std::mem::size_of::<T>();
550 Ok(unsafe { std::slice::from_raw_parts_mut(self.ptr.as_ptr().cast::<T>(), count) })
553 }
554}
555
556impl Deref for MemoryMap {
557 type Target = [u8];
558
559 fn deref(&self) -> &Self::Target {
560 self.as_slice()
561 }
562}
563
564impl AsRef<[u8]> for MemoryMap {
565 fn as_ref(&self) -> &[u8] {
566 self.as_slice()
567 }
568}
569
570impl Drop for MemoryMap {
571 fn drop(&mut self) {
572 if self.is_locked.load(Ordering::Relaxed) {
574 let _ = self.unlock();
575 }
576
577 unsafe {
581 libc::munmap(self.ptr.as_ptr().cast::<libc::c_void>(), self.len);
582 }
583 }
584}
585
586unsafe impl Send for MemoryMap {}
590
591unsafe impl Sync for MemoryMap {}
595
596#[cfg(test)]
597mod tests {
598 use super::*;
599 use std::io::Write;
600
601 fn create_temp_file(size: usize) -> tempfile::NamedTempFile {
602 let mut file =
603 tempfile::NamedTempFile::new().expect("Test helper: temp file creation should succeed");
604 let data = vec![0u8; size];
605 file.write_all(&data)
606 .expect("Test helper: writing to temp file should succeed");
607 file.flush()
608 .expect("Test helper: flushing temp file should succeed");
609 file
610 }
611
612 #[test]
613 fn test_memory_map_readonly() {
614 let file = create_temp_file(4096);
615 let path = file.path();
616
617 let map = MemoryMap::new(path).expect("Memory map creation should succeed in test");
618 assert_eq!(map.len(), 4096);
619 assert!(!map.is_empty());
620
621 let slice = map.as_slice();
622 assert_eq!(slice.len(), 4096);
623 }
624
625 #[test]
626 fn test_memory_map_config() {
627 let file = create_temp_file(8192);
628 let path = file.path();
629
630 let config = MemoryMapConfig::new()
631 .with_mode(MemoryMapMode::ReadOnly)
632 .with_access_pattern(AccessPattern::Sequential)
633 .with_populate(true);
634
635 let map = MemoryMap::with_config(path, config)
636 .expect("Memory map with custom config should succeed");
637 assert_eq!(map.len(), 8192);
638 }
639
640 #[test]
641 fn test_prefetch() {
642 let file = create_temp_file(16384);
643 let path = file.path();
644
645 let map = MemoryMap::new(path).expect("Memory map creation should succeed");
646 map.prefetch(0, 4096)
647 .expect("First prefetch should succeed");
648 map.prefetch(4096, 4096)
649 .expect("Second prefetch should succeed");
650 }
651
652 #[test]
653 fn test_typed_slice() {
654 let file = create_temp_file(4096);
655 let path = file.path();
656
657 let map = MemoryMap::new(path).expect("Memory map creation should succeed");
658 let slice: &[u32] = map
659 .as_typed_slice()
660 .expect("Typed slice conversion should succeed");
661 assert_eq!(slice.len(), 1024);
662 }
663
664 #[test]
665 fn test_access_count() {
666 let file = create_temp_file(4096);
667 let path = file.path();
668
669 let map = MemoryMap::new(path).expect("Memory map creation should succeed");
670 assert_eq!(map.access_count(), 0);
671
672 let _slice = map.as_slice();
673 assert_eq!(map.access_count(), 1);
674
675 let _slice = map.as_slice();
676 assert_eq!(map.access_count(), 2);
677 }
678}