oxicuda_memory/virtual_memory.rs
1//! Virtual memory management for fine-grained GPU address space control.
2//!
3//! This module provides abstractions for CUDA's virtual memory management
4//! API (`cuMemAddressReserve`, `cuMemCreate`, `cuMemMap`, etc.), which
5//! allows separating the concepts of virtual address reservation and
6//! physical memory allocation.
7//!
8//! # Concepts
9//!
10//! * **Virtual Address Range** — A reservation of contiguous virtual
11//! addresses in the GPU address space. No physical memory is committed
12//! until explicitly mapped.
13//!
14//! * **Physical Allocation** — A chunk of physical GPU memory that can
15//! be mapped to one or more virtual address ranges.
16//!
17//! * **Mapping** — The association of a physical allocation with a region
18//! of a virtual address range.
19//!
20//! # Use Cases
21//!
22//! * **Sparse arrays** — Reserve a large virtual range but only commit
23//! physical memory for the tiles/pages that are actually used.
24//!
25//! * **Resizable buffers** — Reserve a large virtual range up-front and
26//! map additional physical memory as the buffer grows, without changing
27//! the base address.
28//!
29//! * **Multi-GPU memory** — Map physical allocations from different devices
30//! into the same virtual address space.
31//!
32//! # Status
33//!
34//! The CUDA virtual-memory management entry points (`cuMemAddressReserve`,
35//! `cuMemCreate`, `cuMemMap`, `cuMemUnmap`, `cuMemSetAccess`,
36//! `cuMemRelease`, `cuMemAddressFree`) are now wired through
37//! `oxicuda-driver`. Operations forward to the driver when it is
38//! available; on platforms without a CUDA driver (such as macOS),
39//! [`oxicuda_driver::loader::try_driver`] returns
40//! [`CudaError::NotInitialized`]. When the driver loads but a particular
41//! VMM symbol is missing (older drivers), the corresponding method
42//! returns [`CudaError::NotSupported`].
43//!
44//! # Example
45//!
46//! ```rust,no_run
47//! use oxicuda_memory::virtual_memory::VirtualMemoryManager;
48//!
49//! // Reserve 1 GiB of virtual address space with 2 MiB alignment.
50//! let va = VirtualMemoryManager::reserve(1 << 30, 1 << 21)?;
51//! assert_eq!(va.size(), 1 << 30);
52//! # Ok::<(), oxicuda_driver::error::CudaError>(())
53//! ```
54
55use std::fmt;
56
57use oxicuda_driver::error::{CudaError, CudaResult, check};
58use oxicuda_driver::ffi::{
59 CUdeviceptr, CUmemAccessDesc, CUmemAllocationHandleType, CUmemAllocationProp,
60 CUmemAllocationType, CUmemGenericAllocationHandle, CUmemLocation, CUmemLocationType,
61};
62
63// ---------------------------------------------------------------------------
64// AccessFlags
65// ---------------------------------------------------------------------------
66
67/// Memory access permission flags for virtual memory mappings.
68///
69/// These flags control how a mapped virtual address range can be accessed
70/// by a given device.
71#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash)]
72pub enum AccessFlags {
73 /// No access permitted. The mapping exists but cannot be read or written.
74 #[default]
75 None,
76 /// Read-only access. The device can read but not write.
77 Read,
78 /// Full read-write access.
79 ReadWrite,
80}
81
82impl fmt::Display for AccessFlags {
83 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
84 match self {
85 Self::None => write!(f, "None"),
86 Self::Read => write!(f, "Read"),
87 Self::ReadWrite => write!(f, "ReadWrite"),
88 }
89 }
90}
91
92// ---------------------------------------------------------------------------
93// VirtualAddressRange
94// ---------------------------------------------------------------------------
95
96/// A reserved range of virtual addresses in the GPU address space.
97///
98/// This represents a contiguous block of virtual addresses that has been
99/// reserved but not necessarily backed by physical memory. Physical memory
100/// is associated with the range via [`VirtualMemoryManager::map`].
101///
102/// # Note
103///
104/// On systems without CUDA virtual memory support, the `base` address
105/// is set to 0 and operations on the range will return
106/// [`CudaError::NotSupported`].
107#[derive(Debug, Clone, PartialEq, Eq)]
108pub struct VirtualAddressRange {
109 base: u64,
110 size: usize,
111 alignment: usize,
112}
113
114impl VirtualAddressRange {
115 /// Returns the base virtual address of the range.
116 #[inline]
117 pub fn base(&self) -> u64 {
118 self.base
119 }
120
121 /// Returns the size of the range in bytes.
122 #[inline]
123 pub fn size(&self) -> usize {
124 self.size
125 }
126
127 /// Returns the alignment of the range in bytes.
128 #[inline]
129 pub fn alignment(&self) -> usize {
130 self.alignment
131 }
132
133 /// Returns whether the range contains the given virtual address.
134 pub fn contains(&self, addr: u64) -> bool {
135 addr >= self.base && addr < self.base.saturating_add(self.size as u64)
136 }
137
138 /// Returns the end address (exclusive) of the range.
139 #[inline]
140 pub fn end(&self) -> u64 {
141 self.base.saturating_add(self.size as u64)
142 }
143}
144
145impl fmt::Display for VirtualAddressRange {
146 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
147 write!(
148 f,
149 "VA[0x{:016x}..0x{:016x}, {} bytes, align={}]",
150 self.base,
151 self.end(),
152 self.size,
153 self.alignment,
154 )
155 }
156}
157
158// ---------------------------------------------------------------------------
159// PhysicalAllocation
160// ---------------------------------------------------------------------------
161
162/// A physical memory allocation on a specific GPU device.
163///
164/// Physical allocations represent actual GPU VRAM that can be mapped
165/// into virtual address ranges. Multiple virtual ranges can map to
166/// the same physical allocation (aliasing).
167///
168/// # Note
169///
170/// On systems without CUDA virtual memory support, the `handle` is
171/// set to 0 and the allocation is not backed by real memory.
172#[derive(Debug, Clone, PartialEq, Eq)]
173pub struct PhysicalAllocation {
174 handle: u64,
175 size: usize,
176 device_ordinal: i32,
177}
178
179impl PhysicalAllocation {
180 /// Returns the opaque handle for this physical allocation.
181 #[inline]
182 pub fn handle(&self) -> u64 {
183 self.handle
184 }
185
186 /// Returns the size of this allocation in bytes.
187 #[inline]
188 pub fn size(&self) -> usize {
189 self.size
190 }
191
192 /// Returns the device ordinal this allocation belongs to.
193 #[inline]
194 pub fn device_ordinal(&self) -> i32 {
195 self.device_ordinal
196 }
197}
198
199impl fmt::Display for PhysicalAllocation {
200 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
201 write!(
202 f,
203 "PhysAlloc[handle=0x{:016x}, {} bytes, dev={}]",
204 self.handle, self.size, self.device_ordinal,
205 )
206 }
207}
208
209// ---------------------------------------------------------------------------
210// MappingRecord — tracks virtual-to-physical mappings
211// ---------------------------------------------------------------------------
212
213/// A record of a virtual-to-physical memory mapping.
214#[derive(Debug, Clone, PartialEq, Eq)]
215pub struct MappingRecord {
216 /// Offset within the virtual address range where the mapping starts.
217 pub va_offset: usize,
218 /// Size of the mapped region in bytes.
219 pub size: usize,
220 /// Handle of the physical allocation backing this mapping.
221 pub phys_handle: u64,
222 /// Access permissions for this mapping.
223 pub access: AccessFlags,
224}
225
226// ---------------------------------------------------------------------------
227// VirtualMemoryManager
228// ---------------------------------------------------------------------------
229
230/// Manager for GPU virtual memory operations.
231///
232/// Provides methods for reserving virtual address ranges, allocating
233/// physical memory, mapping/unmapping, and setting access permissions.
234///
235/// # Status
236///
237/// The underlying CUDA virtual memory driver functions
238/// (`cuMemAddressReserve`, `cuMemCreate`, `cuMemMap`, `cuMemUnmap`,
239/// `cuMemSetAccess`, `cuMemRelease`, `cuMemAddressFree`) are wired
240/// through `oxicuda-driver`. On systems without a CUDA driver
241/// the calls fail with [`CudaError::NotInitialized`]; on systems
242/// with a driver that lacks a specific VMM symbol the calls fail
243/// with [`CudaError::NotSupported`].
244pub struct VirtualMemoryManager;
245
246impl VirtualMemoryManager {
247 /// Reserves a range of virtual addresses in the GPU address space.
248 ///
249 /// The reserved range is not backed by physical memory until
250 /// [`map`](Self::map) is called.
251 ///
252 /// # Parameters
253 ///
254 /// * `size` - Size of the virtual range to reserve in bytes.
255 /// Must be a multiple of `alignment`.
256 /// * `alignment` - Alignment requirement in bytes. Must be a power
257 /// of two and non-zero.
258 ///
259 /// # Errors
260 ///
261 /// * [`CudaError::InvalidValue`] if `size` is zero, `alignment` is
262 /// zero, `alignment` is not a power of two, or `size` is not a
263 /// multiple of `alignment`.
264 pub fn reserve(size: usize, alignment: usize) -> CudaResult<VirtualAddressRange> {
265 if size == 0 {
266 return Err(CudaError::InvalidValue);
267 }
268 if alignment == 0 || !alignment.is_power_of_two() {
269 return Err(CudaError::InvalidValue);
270 }
271 if size % alignment != 0 {
272 return Err(CudaError::InvalidValue);
273 }
274
275 let api = oxicuda_driver::loader::try_driver()?;
276 let f = api.cu_mem_address_reserve.ok_or(CudaError::NotSupported)?;
277 let mut base: CUdeviceptr = 0;
278 // addr=0 lets the driver choose; flags=0 (reserved for future use).
279 check(unsafe { f(&mut base, size, alignment, 0, 0) })?;
280
281 Ok(VirtualAddressRange {
282 base,
283 size,
284 alignment,
285 })
286 }
287
288 /// Releases a previously reserved virtual address range.
289 ///
290 /// After this call, the virtual addresses are no longer reserved
291 /// and may be reused by future reservations.
292 ///
293 /// # Errors
294 ///
295 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
296 /// (e.g. on macOS).
297 /// * [`CudaError::NotSupported`] if the driver does not export
298 /// `cuMemAddressFree`.
299 /// * Other [`CudaError`] variants on driver failure.
300 pub fn release(va: VirtualAddressRange) -> CudaResult<()> {
301 let api = oxicuda_driver::loader::try_driver()?;
302 let f = api.cu_mem_address_free.ok_or(CudaError::NotSupported)?;
303 check(unsafe { f(va.base, va.size) })
304 }
305
306 /// Allocates physical memory on the specified device.
307 ///
308 /// The allocated memory is not accessible until mapped into a
309 /// virtual address range via [`map`](Self::map).
310 ///
311 /// # Parameters
312 ///
313 /// * `size` - Size of the allocation in bytes. Must be non-zero.
314 /// * `device_ordinal` - Ordinal of the device to allocate on.
315 ///
316 /// # Errors
317 ///
318 /// * [`CudaError::InvalidValue`] if `size` is zero or `device_ordinal`
319 /// is negative.
320 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
321 /// (e.g. on macOS).
322 /// * [`CudaError::NotSupported`] if the driver does not export
323 /// `cuMemCreate`.
324 /// * Other [`CudaError`] variants on driver failure.
325 pub fn alloc_physical(size: usize, device_ordinal: i32) -> CudaResult<PhysicalAllocation> {
326 if size == 0 {
327 return Err(CudaError::InvalidValue);
328 }
329 if device_ordinal < 0 {
330 return Err(CudaError::InvalidValue);
331 }
332
333 let api = oxicuda_driver::loader::try_driver()?;
334 let f = api.cu_mem_create.ok_or(CudaError::NotSupported)?;
335
336 let prop = CUmemAllocationProp {
337 alloc_type: CUmemAllocationType::Pinned as u32,
338 requested_handle_types: CUmemAllocationHandleType::None as u32,
339 location: CUmemLocation {
340 loc_type: CUmemLocationType::Device as u32,
341 id: device_ordinal,
342 },
343 ..CUmemAllocationProp::default()
344 };
345
346 let mut handle: CUmemGenericAllocationHandle = 0;
347 check(unsafe { f(&mut handle, size, &prop, 0) })?;
348
349 Ok(PhysicalAllocation {
350 handle,
351 size,
352 device_ordinal,
353 })
354 }
355
356 /// Frees a physical memory allocation.
357 ///
358 /// The allocation must not be currently mapped to any virtual range.
359 ///
360 /// # Errors
361 ///
362 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
363 /// (e.g. on macOS).
364 /// * [`CudaError::NotSupported`] if the driver does not export
365 /// `cuMemRelease`.
366 /// * Other [`CudaError`] variants on driver failure.
367 pub fn free_physical(phys: PhysicalAllocation) -> CudaResult<()> {
368 let api = oxicuda_driver::loader::try_driver()?;
369 let f = api.cu_mem_release.ok_or(CudaError::NotSupported)?;
370 check(unsafe { f(phys.handle) })
371 }
372
373 /// Maps a physical allocation to a region of a virtual address range.
374 ///
375 /// After mapping, GPU kernels can access the virtual addresses and
376 /// reads/writes will be routed to the physical memory.
377 ///
378 /// # Parameters
379 ///
380 /// * `va` - The virtual address range to map into.
381 /// * `phys` - The physical allocation to map.
382 /// * `offset` - Byte offset within the virtual range at which to
383 /// start the mapping. Must be aligned to the VA's alignment.
384 ///
385 /// # Errors
386 ///
387 /// * [`CudaError::InvalidValue`] if `offset` is not aligned, or if
388 /// the physical allocation would extend past the end of the virtual
389 /// range.
390 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
391 /// (e.g. on macOS).
392 /// * [`CudaError::NotSupported`] if the driver does not export
393 /// `cuMemMap`.
394 /// * Other [`CudaError`] variants on driver failure.
395 pub fn map(
396 va: &VirtualAddressRange,
397 phys: &PhysicalAllocation,
398 offset: usize,
399 ) -> CudaResult<()> {
400 // Validate alignment
401 if va.alignment > 0 && offset % va.alignment != 0 {
402 return Err(CudaError::InvalidValue);
403 }
404 // Validate bounds
405 let end = offset
406 .checked_add(phys.size)
407 .ok_or(CudaError::InvalidValue)?;
408 if end > va.size {
409 return Err(CudaError::InvalidValue);
410 }
411
412 let api = oxicuda_driver::loader::try_driver()?;
413 let f = api.cu_mem_map.ok_or(CudaError::NotSupported)?;
414
415 // ptr = base + offset (VA), offset_into_phys = 0, size = phys.size, flags = 0.
416 let target_va: CUdeviceptr = va.base.saturating_add(offset as u64);
417 check(unsafe { f(target_va, phys.size, 0, phys.handle, 0) })
418 }
419
420 /// Unmaps a region of a virtual address range.
421 ///
422 /// After unmapping, accesses to the affected virtual addresses will
423 /// fault. The physical memory is not freed — it can be remapped
424 /// elsewhere.
425 ///
426 /// # Parameters
427 ///
428 /// * `va` - The virtual address range to unmap from.
429 /// * `offset` - Byte offset within the range where unmapping starts.
430 /// * `size` - Number of bytes to unmap.
431 ///
432 /// # Errors
433 ///
434 /// * [`CudaError::InvalidValue`] if the offset+size exceeds the
435 /// virtual range bounds.
436 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
437 /// (e.g. on macOS).
438 /// * [`CudaError::NotSupported`] if the driver does not export
439 /// `cuMemUnmap`.
440 /// * Other [`CudaError`] variants on driver failure.
441 pub fn unmap(va: &VirtualAddressRange, offset: usize, size: usize) -> CudaResult<()> {
442 let end = offset.checked_add(size).ok_or(CudaError::InvalidValue)?;
443 if end > va.size {
444 return Err(CudaError::InvalidValue);
445 }
446
447 let api = oxicuda_driver::loader::try_driver()?;
448 let f = api.cu_mem_unmap.ok_or(CudaError::NotSupported)?;
449
450 let target_va: CUdeviceptr = va.base.saturating_add(offset as u64);
451 check(unsafe { f(target_va, size) })
452 }
453
454 /// Sets access permissions for a virtual address range on a device.
455 ///
456 /// This controls whether the specified device can read and/or write
457 /// to the mapped virtual addresses.
458 ///
459 /// # Parameters
460 ///
461 /// * `va` - The virtual address range to set permissions on.
462 /// * `device_ordinal` - The device to grant/deny access for.
463 /// * `flags` - The access permission flags.
464 ///
465 /// # Errors
466 ///
467 /// * [`CudaError::NotInitialized`] if no CUDA driver is available
468 /// (e.g. on macOS).
469 /// * [`CudaError::NotSupported`] if the driver does not export
470 /// `cuMemSetAccess`.
471 /// * Other [`CudaError`] variants on driver failure.
472 pub fn set_access(
473 va: &VirtualAddressRange,
474 device_ordinal: i32,
475 flags: AccessFlags,
476 ) -> CudaResult<()> {
477 let api = oxicuda_driver::loader::try_driver()?;
478 let f = api.cu_mem_set_access.ok_or(CudaError::NotSupported)?;
479
480 let desc = CUmemAccessDesc {
481 location: CUmemLocation {
482 loc_type: CUmemLocationType::Device as u32,
483 id: device_ordinal,
484 },
485 flags: match flags {
486 AccessFlags::None => 0,
487 AccessFlags::Read => 1,
488 AccessFlags::ReadWrite => 3,
489 },
490 };
491
492 check(unsafe { f(va.base, va.size, &desc, 1) })
493 }
494}
495
496// ---------------------------------------------------------------------------
497// Tests
498// ---------------------------------------------------------------------------
499
500#[cfg(test)]
501mod tests {
502 use super::*;
503
504 /// Returns `true` when the driver-failure error kind is acceptable for a
505 /// no-GPU host. The driver may be loaded but have no real GPU hardware or
506 /// VMM granularity support, in which case it returns `InvalidValue`,
507 /// `InvalidDevice`, `NoDevice`, or `InvalidContext` in addition to the
508 /// canonical `NotInitialized` (no driver; macOS) and `NotSupported`
509 /// (driver loaded but symbol missing) variants.
510 fn is_driver_unavailable(err: &CudaError) -> bool {
511 matches!(
512 err,
513 CudaError::NotInitialized
514 | CudaError::NotSupported
515 | CudaError::InvalidValue
516 | CudaError::InvalidDevice
517 | CudaError::NoDevice
518 | CudaError::InvalidContext
519 )
520 }
521
522 // -- Reservation: argument-validation paths --------------------------------
523
524 #[test]
525 fn reserve_zero_size_fails() {
526 let result = VirtualMemoryManager::reserve(0, 4096);
527 assert_eq!(result, Err(CudaError::InvalidValue));
528 }
529
530 #[test]
531 fn reserve_zero_alignment_fails() {
532 let result = VirtualMemoryManager::reserve(4096, 0);
533 assert_eq!(result, Err(CudaError::InvalidValue));
534 }
535
536 #[test]
537 fn reserve_non_power_of_two_alignment_fails() {
538 let result = VirtualMemoryManager::reserve(4096, 3);
539 assert_eq!(result, Err(CudaError::InvalidValue));
540 }
541
542 #[test]
543 fn reserve_misaligned_size_fails() {
544 // 4096+1 is not a multiple of 4096
545 let result = VirtualMemoryManager::reserve(4097, 4096);
546 assert_eq!(result, Err(CudaError::InvalidValue));
547 }
548
549 // -- Reservation: driver-call path on hosts without a CUDA driver ----------
550
551 /// On a host without a CUDA driver, `reserve` must fail cleanly with one of
552 /// the driver-unavailability error kinds rather than panicking.
553 #[test]
554 fn reserve_no_driver_returns_driver_unavailable() {
555 let result = VirtualMemoryManager::reserve(4096, 4096);
556 match result {
557 Ok(va) => {
558 // Real CUDA driver present: the driver gave us a base.
559 assert_eq!(va.size(), 4096);
560 assert_eq!(va.alignment(), 4096);
561 }
562 Err(e) => assert!(
563 is_driver_unavailable(&e),
564 "unexpected error from reserve: {e:?}"
565 ),
566 }
567 }
568
569 // -- VirtualAddressRange accessor methods ---------------------------------
570
571 #[test]
572 fn virtual_address_range_contains_synthetic() {
573 // Build the value-object directly so the test runs everywhere.
574 let va = VirtualAddressRange {
575 base: 0x1_0000_0000,
576 size: 8192,
577 alignment: 4096,
578 };
579 assert!(va.contains(va.base()));
580 assert!(va.contains(va.base() + 1));
581 assert!(va.contains(va.base() + 8191));
582 assert!(!va.contains(va.end()));
583 assert!(!va.contains(va.base().wrapping_sub(1)));
584 }
585
586 #[test]
587 fn virtual_address_range_end_synthetic() {
588 let va = VirtualAddressRange {
589 base: 0x1_0000_0000,
590 size: 4096,
591 alignment: 4096,
592 };
593 assert_eq!(va.end(), va.base() + 4096);
594 }
595
596 #[test]
597 fn virtual_address_range_display_synthetic() {
598 let va = VirtualAddressRange {
599 base: 0x1_0000_0000,
600 size: 4096,
601 alignment: 4096,
602 };
603 let disp = format!("{va}");
604 assert!(disp.contains("VA["));
605 assert!(disp.contains("4096 bytes"));
606 }
607
608 // -- Physical allocation: argument-validation and driver-unavailable path --
609
610 #[test]
611 fn alloc_physical_zero_size_fails() {
612 let result = VirtualMemoryManager::alloc_physical(0, 0);
613 assert_eq!(result, Err(CudaError::InvalidValue));
614 }
615
616 #[test]
617 fn alloc_physical_negative_device_fails() {
618 let result = VirtualMemoryManager::alloc_physical(4096, -1);
619 assert_eq!(result, Err(CudaError::InvalidValue));
620 }
621
622 #[test]
623 fn alloc_physical_no_driver_returns_driver_unavailable() {
624 let result = VirtualMemoryManager::alloc_physical(4096, 0);
625 if let Err(e) = result {
626 assert!(
627 is_driver_unavailable(&e),
628 "expected driver-unavailable error, got {e:?}"
629 );
630 }
631 // On a real CUDA box the call may succeed; we only require not-panic.
632 }
633
634 #[test]
635 fn release_no_driver_returns_driver_unavailable() {
636 let va = VirtualAddressRange {
637 base: 0x1_0000_0000,
638 size: 4096,
639 alignment: 4096,
640 };
641 if let Err(e) = VirtualMemoryManager::release(va) {
642 assert!(
643 is_driver_unavailable(&e),
644 "expected driver-unavailable error, got {e:?}"
645 );
646 }
647 }
648
649 #[test]
650 fn free_physical_no_driver_returns_driver_unavailable() {
651 // Calling cuMemRelease with a fake handle when the driver is loaded is
652 // undefined behaviour that can SIGSEGV the process. This test only
653 // covers the "driver not available" path.
654 if oxicuda_driver::loader::try_driver().is_ok() {
655 return;
656 }
657 let phys = PhysicalAllocation {
658 handle: 1,
659 size: 4096,
660 device_ordinal: 0,
661 };
662 if let Err(e) = VirtualMemoryManager::free_physical(phys) {
663 assert!(
664 is_driver_unavailable(&e),
665 "expected driver-unavailable error, got {e:?}"
666 );
667 }
668 }
669
670 // -- map / unmap / set_access argument-validation paths --------------------
671
672 #[test]
673 fn map_validates_alignment() {
674 let va = VirtualAddressRange {
675 base: 0x1_0000_0000,
676 size: 8192,
677 alignment: 4096,
678 };
679 let phys = PhysicalAllocation {
680 handle: 1,
681 size: 4096,
682 device_ordinal: 0,
683 };
684 // Offset 1 is not aligned to 4096
685 let result = VirtualMemoryManager::map(&va, &phys, 1);
686 assert_eq!(result, Err(CudaError::InvalidValue));
687 }
688
689 #[test]
690 fn map_validates_bounds() {
691 let va = VirtualAddressRange {
692 base: 0x1_0000_0000,
693 size: 4096,
694 alignment: 4096,
695 };
696 let phys = PhysicalAllocation {
697 handle: 1,
698 size: 8192, // larger than VA range
699 device_ordinal: 0,
700 };
701 let result = VirtualMemoryManager::map(&va, &phys, 0);
702 assert_eq!(result, Err(CudaError::InvalidValue));
703 }
704
705 #[test]
706 fn map_no_driver_returns_driver_unavailable() {
707 // Calling cuMemMap with a fake virtual address and fake handle when the
708 // driver is loaded is undefined behaviour that can SIGSEGV the process.
709 // This test only covers the "driver not available" path.
710 if oxicuda_driver::loader::try_driver().is_ok() {
711 return;
712 }
713 let va = VirtualAddressRange {
714 base: 0x1_0000_0000,
715 size: 8192,
716 alignment: 4096,
717 };
718 let phys = PhysicalAllocation {
719 handle: 1,
720 size: 4096,
721 device_ordinal: 0,
722 };
723 if let Err(e) = VirtualMemoryManager::map(&va, &phys, 0) {
724 assert!(
725 is_driver_unavailable(&e),
726 "expected driver-unavailable error, got {e:?}"
727 );
728 }
729 }
730
731 #[test]
732 fn unmap_validates_bounds() {
733 let va = VirtualAddressRange {
734 base: 0x1_0000_0000,
735 size: 4096,
736 alignment: 4096,
737 };
738 let result = VirtualMemoryManager::unmap(&va, 0, 8192);
739 assert_eq!(result, Err(CudaError::InvalidValue));
740 }
741
742 #[test]
743 fn unmap_no_driver_returns_driver_unavailable() {
744 let va = VirtualAddressRange {
745 base: 0x1_0000_0000,
746 size: 4096,
747 alignment: 4096,
748 };
749 if let Err(e) = VirtualMemoryManager::unmap(&va, 0, 4096) {
750 assert!(
751 is_driver_unavailable(&e),
752 "expected driver-unavailable error, got {e:?}"
753 );
754 }
755 }
756
757 #[test]
758 fn set_access_no_driver_returns_driver_unavailable() {
759 let va = VirtualAddressRange {
760 base: 0x1_0000_0000,
761 size: 4096,
762 alignment: 4096,
763 };
764 if let Err(e) = VirtualMemoryManager::set_access(&va, 0, AccessFlags::ReadWrite) {
765 assert!(
766 is_driver_unavailable(&e),
767 "expected driver-unavailable error, got {e:?}"
768 );
769 }
770 }
771
772 // -- Plain value-object tests (platform-independent) -----------------------
773
774 #[test]
775 fn access_flags_default() {
776 assert_eq!(AccessFlags::default(), AccessFlags::None);
777 }
778
779 #[test]
780 fn access_flags_display() {
781 assert_eq!(format!("{}", AccessFlags::None), "None");
782 assert_eq!(format!("{}", AccessFlags::Read), "Read");
783 assert_eq!(format!("{}", AccessFlags::ReadWrite), "ReadWrite");
784 }
785
786 #[test]
787 fn physical_allocation_display() {
788 let phys = PhysicalAllocation {
789 handle: 0x1234,
790 size: 4096,
791 device_ordinal: 0,
792 };
793 let disp = format!("{phys}");
794 assert!(disp.contains("4096 bytes"));
795 assert!(disp.contains("dev=0"));
796 }
797
798 #[test]
799 fn mapping_record_fields() {
800 let record = MappingRecord {
801 va_offset: 0,
802 size: 4096,
803 phys_handle: 42,
804 access: AccessFlags::ReadWrite,
805 };
806 assert_eq!(record.va_offset, 0);
807 assert_eq!(record.size, 4096);
808 assert_eq!(record.phys_handle, 42);
809 assert_eq!(record.access, AccessFlags::ReadWrite);
810 }
811
812 /// On macOS specifically, every driver-calling method must return
813 /// [`CudaError::NotInitialized`] (no library to load).
814 #[cfg(target_os = "macos")]
815 #[test]
816 fn macos_paths_return_not_initialized() {
817 assert_eq!(
818 VirtualMemoryManager::reserve(4096, 4096),
819 Err(CudaError::NotInitialized)
820 );
821 assert_eq!(
822 VirtualMemoryManager::alloc_physical(4096, 0),
823 Err(CudaError::NotInitialized)
824 );
825 let phys = PhysicalAllocation {
826 handle: 1,
827 size: 4096,
828 device_ordinal: 0,
829 };
830 assert_eq!(
831 VirtualMemoryManager::free_physical(phys.clone()),
832 Err(CudaError::NotInitialized)
833 );
834 let va = VirtualAddressRange {
835 base: 0x1_0000_0000,
836 size: 4096,
837 alignment: 4096,
838 };
839 assert_eq!(
840 VirtualMemoryManager::release(va.clone()),
841 Err(CudaError::NotInitialized)
842 );
843 assert_eq!(
844 VirtualMemoryManager::map(&va, &phys, 0),
845 Err(CudaError::NotInitialized)
846 );
847 assert_eq!(
848 VirtualMemoryManager::unmap(&va, 0, 4096),
849 Err(CudaError::NotInitialized)
850 );
851 assert_eq!(
852 VirtualMemoryManager::set_access(&va, 0, AccessFlags::ReadWrite),
853 Err(CudaError::NotInitialized)
854 );
855 }
856}