#![allow(unused_variables)]
#![allow(unused_unsafe)]
use super::allocation::{pinned_size_class, AllocationStats, PinnedAllocation, PinnedMemoryFlags};
use crate::cuda::cuda_sys_compat as cuda_sys;
use crate::cuda::error::{CudaError, CudaResult};
use cust::prelude::DevicePointer;
use std::collections::HashMap;
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc, Mutex,
};
use std::time::{Duration, Instant};
static PINNED_MANAGERS: once_cell::sync::Lazy<Mutex<HashMap<usize, Arc<PinnedMemoryManager>>>> =
once_cell::sync::Lazy::new(|| Mutex::new(HashMap::new()));
#[derive(Debug)]
pub struct PinnedMemoryManager {
device_id: usize,
pools: Mutex<HashMap<usize, PinnedMemoryPool>>,
total_pinned_memory: AtomicUsize,
peak_pinned_memory: AtomicUsize,
pinned_memory_limit: AtomicUsize,
allocation_stats: Mutex<PinnedAllocationStats>,
config: PinnedMemoryConfig,
last_cleanup: Mutex<Instant>,
transfer_metrics: Mutex<TransferMetrics>,
}
#[derive(Debug)]
pub struct PinnedMemoryPool {
size_class: usize,
free_blocks: Vec<PinnedAllocation>,
allocated_blocks: Vec<PinnedAllocation>,
total_allocations: usize,
peak_usage: usize,
cache_hits: usize,
cache_misses: usize,
last_access: Instant,
}
#[derive(Debug, Clone)]
pub struct PinnedMemoryConfig {
pub max_pinned_memory: usize,
pub max_cache_age: Duration,
pub enable_auto_cleanup: bool,
pub cleanup_interval: Duration,
pub max_free_blocks_per_pool: usize,
pub enable_device_mapping: bool,
pub enable_portable_memory: bool,
pub enable_write_combining: bool,
pub enable_transfer_tracking: bool,
pub memory_alignment: usize,
}
#[derive(Debug, Clone)]
pub struct PinnedMemoryRequest {
pub size: usize,
pub enable_mapping: bool,
pub flags: PinnedMemoryFlags,
pub alignment: Option<usize>,
pub tag: Option<String>,
pub usage_pattern: UsagePattern,
pub priority: AllocationPriority,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UsagePattern {
HostToDevice,
DeviceToHost,
Bidirectional,
Staging,
Persistent,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum AllocationPriority {
Low,
Normal,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct PinnedAllocationStats {
pub base_stats: AllocationStats,
pub mapping_success_rate: f32,
pub average_allocation_age: Duration,
pub total_mapped_allocations: u64,
pub current_mapped_allocations: u64,
pub memory_pressure_events: u64,
pub cleanup_operations: u64,
pub cleanup_bytes_freed: u64,
}
#[derive(Debug, Clone)]
pub struct TransferMetrics {
pub host_to_device: TransferStats,
pub device_to_host: TransferStats,
pub bidirectional: TransferStats,
pub overall_efficiency: f32,
pub peak_bandwidth: f64,
pub average_latency: Duration,
}
#[derive(Debug, Clone)]
pub struct TransferStats {
pub total_transfers: u64,
pub total_bytes: u64,
pub total_time: Duration,
pub average_bandwidth: f64,
pub peak_bandwidth: f64,
pub min_bandwidth: f64,
pub efficiency: f32,
}
#[derive(Debug, Clone)]
pub struct PinnedMemoryInfo {
pub device_id: usize,
pub current_allocated: usize,
pub peak_allocated: usize,
pub memory_limit: usize,
pub utilization_percent: usize,
pub active_pools: usize,
pub cached_allocations: usize,
pub fragmentation_level: f32,
}
#[derive(Debug, Clone)]
pub struct PinnedCleanupResult {
pub allocations_freed: usize,
pub bytes_freed: usize,
pub pools_cleaned: usize,
pub duration: Duration,
pub success: bool,
}
impl PinnedMemoryManager {
pub fn new(config: PinnedMemoryConfig) -> CudaResult<Self> {
Self::new_for_device(0, config)
}
pub fn new_for_device(device_id: usize, config: PinnedMemoryConfig) -> CudaResult<Self> {
Ok(Self {
device_id,
pools: Mutex::new(HashMap::new()),
total_pinned_memory: AtomicUsize::new(0),
peak_pinned_memory: AtomicUsize::new(0),
pinned_memory_limit: AtomicUsize::new(config.max_pinned_memory),
allocation_stats: Mutex::new(PinnedAllocationStats::default()),
config,
last_cleanup: Mutex::new(Instant::now()),
transfer_metrics: Mutex::new(TransferMetrics::default()),
})
}
pub fn allocate_pinned(&self, request: PinnedMemoryRequest) -> CudaResult<PinnedAllocation> {
let allocation_start = Instant::now();
self.validate_request(&request)?;
self.check_memory_limits(request.size)?;
let size_class = pinned_size_class(request.size);
if let Some(allocation) = self.try_pool_allocation(size_class, &request)? {
self.record_allocation_success(size_class, allocation_start, true);
return Ok(allocation);
}
let allocation = self.allocate_new_pinned_block(size_class, &request)?;
self.record_allocation_success(size_class, allocation_start, false);
Ok(allocation)
}
pub fn deallocate_pinned(&self, allocation: PinnedAllocation) -> CudaResult<()> {
let size = allocation.size;
self.update_deallocation_stats(&allocation);
if self.should_cache_allocation(&allocation) {
self.return_to_pool(allocation)?;
} else {
self.free_pinned_allocation(allocation)?;
}
Ok(())
}
pub fn info(&self) -> PinnedMemoryInfo {
let current_allocated = self.total_pinned_memory.load(Ordering::Relaxed);
let peak_allocated = self.peak_pinned_memory.load(Ordering::Relaxed);
let memory_limit = self.pinned_memory_limit.load(Ordering::Relaxed);
let active_pools = self.pools.lock().map(|pools| pools.len()).unwrap_or(0);
let cached_allocations = self.get_cached_allocation_count();
PinnedMemoryInfo {
device_id: self.device_id,
current_allocated,
peak_allocated,
memory_limit,
utilization_percent: if memory_limit > 0 {
(current_allocated * 100) / memory_limit
} else {
0
},
active_pools,
cached_allocations,
fragmentation_level: self.calculate_fragmentation_level(),
}
}
pub fn stats(&self) -> CudaResult<PinnedAllocationStats> {
let stats = self
.allocation_stats
.lock()
.map_err(|_| CudaError::Context {
message: "Failed to acquire statistics lock".to_string(),
})?;
Ok(stats.clone())
}
pub fn cleanup(&self) -> CudaResult<PinnedCleanupResult> {
let cleanup_start = Instant::now();
let mut allocations_freed = 0;
let mut bytes_freed = 0;
let mut pools_cleaned = 0;
let mut pools = self.pools.lock().map_err(|_| CudaError::Context {
message: "Failed to acquire pools lock for cleanup".to_string(),
})?;
for (_, pool) in pools.iter_mut() {
let result = pool.cleanup_old_allocations(Instant::now(), self.config.max_cache_age)?;
if result.allocations_freed > 0 {
allocations_freed += result.allocations_freed;
bytes_freed += result.bytes_freed;
pools_cleaned += 1;
}
}
pools.retain(|_, pool| !pool.is_empty());
if let Ok(mut last_cleanup) = self.last_cleanup.lock() {
*last_cleanup = Instant::now();
}
if let Ok(mut stats) = self.allocation_stats.lock() {
stats.cleanup_operations += 1;
stats.cleanup_bytes_freed += bytes_freed as u64;
}
Ok(PinnedCleanupResult {
allocations_freed,
bytes_freed,
pools_cleaned,
duration: cleanup_start.elapsed(),
success: true,
})
}
pub fn record_transfer(
&self,
direction: TransferDirection,
bytes: usize,
duration: Duration,
) -> CudaResult<()> {
if !self.config.enable_transfer_tracking {
return Ok(());
}
let mut metrics = self
.transfer_metrics
.lock()
.map_err(|_| CudaError::Context {
message: "Failed to acquire transfer metrics lock".to_string(),
})?;
let bandwidth = bytes as f64 / duration.as_secs_f64();
match direction {
TransferDirection::HostToDevice => {
metrics
.host_to_device
.update_stats(bytes, duration, bandwidth);
}
TransferDirection::DeviceToHost => {
metrics
.device_to_host
.update_stats(bytes, duration, bandwidth);
}
TransferDirection::Bidirectional => {
metrics
.bidirectional
.update_stats(bytes, duration, bandwidth);
}
}
if bandwidth > metrics.peak_bandwidth {
metrics.peak_bandwidth = bandwidth;
}
let total_transfers = metrics.host_to_device.total_transfers
+ metrics.device_to_host.total_transfers
+ metrics.bidirectional.total_transfers;
if total_transfers > 0 {
let total_time = metrics.host_to_device.total_time
+ metrics.device_to_host.total_time
+ metrics.bidirectional.total_time;
metrics.average_latency = total_time / total_transfers as u32;
metrics.overall_efficiency = (bandwidth / metrics.peak_bandwidth) as f32;
}
Ok(())
}
pub fn get_transfer_metrics(&self) -> CudaResult<TransferMetrics> {
let metrics = self
.transfer_metrics
.lock()
.map_err(|_| CudaError::Context {
message: "Failed to acquire transfer metrics lock".to_string(),
})?;
Ok(metrics.clone())
}
pub fn should_run_cleanup(&self) -> bool {
if !self.config.enable_auto_cleanup {
return false;
}
if let Ok(last_cleanup) = self.last_cleanup.lock() {
let age = Instant::now().duration_since(*last_cleanup);
age >= self.config.cleanup_interval
} else {
false
}
}
fn validate_request(&self, request: &PinnedMemoryRequest) -> CudaResult<()> {
if request.size == 0 {
return Err(CudaError::Context {
message: "Cannot allocate zero bytes".to_string(),
});
}
if request.size > self.config.max_pinned_memory {
return Err(CudaError::Context {
message: format!(
"Requested size {} exceeds maximum pinned memory {}",
request.size, self.config.max_pinned_memory
),
});
}
Ok(())
}
fn check_memory_limits(&self, size: usize) -> CudaResult<()> {
let current = self.total_pinned_memory.load(Ordering::Relaxed);
let limit = self.pinned_memory_limit.load(Ordering::Relaxed);
if current + size > limit {
if self.config.enable_auto_cleanup {
let _ = self.cleanup();
let current_after_cleanup = self.total_pinned_memory.load(Ordering::Relaxed);
if current_after_cleanup + size > limit {
return Err(CudaError::Context {
message: format!(
"Pinned memory allocation would exceed limit. Requested: {}, Current: {}, Limit: {}",
size, current_after_cleanup, limit
),
});
}
} else {
return Err(CudaError::Context {
message: format!(
"Pinned memory allocation would exceed limit. Requested: {}, Current: {}, Limit: {}",
size, current, limit
),
});
}
}
Ok(())
}
fn try_pool_allocation(
&self,
size_class: usize,
request: &PinnedMemoryRequest,
) -> CudaResult<Option<PinnedAllocation>> {
let mut pools = self.pools.lock().map_err(|_| CudaError::Context {
message: "Failed to acquire pools lock".to_string(),
})?;
if let Some(pool) = pools.get_mut(&size_class) {
if let Some(mut allocation) = pool.allocate() {
allocation.increment_usage();
if let Some(tag) = &request.tag {
allocation.metadata.tag = Some(tag.clone());
}
return Ok(Some(allocation));
}
}
Ok(None)
}
fn allocate_new_pinned_block(
&self,
size_class: usize,
request: &PinnedMemoryRequest,
) -> CudaResult<PinnedAllocation> {
let flags = self.calculate_cuda_flags(&request.flags);
let ptr = self.allocate_cuda_pinned_memory(size_class, flags)?;
let device_ptr = if request.enable_mapping || self.config.enable_device_mapping {
self.map_pinned_memory_to_device(ptr, size_class)?
} else {
None
};
let mut allocation =
PinnedAllocation::new_with_mapping(ptr, size_class, device_ptr, request.flags);
if let Some(tag) = &request.tag {
allocation.metadata.tag = Some(tag.clone());
}
let mut pools = self.pools.lock().map_err(|_| CudaError::Context {
message: "Failed to acquire pools lock".to_string(),
})?;
let pool = pools
.entry(size_class)
.or_insert_with(|| PinnedMemoryPool::new(size_class));
pool.add_allocation(allocation.clone());
self.update_allocation_stats(size_class);
Ok(allocation)
}
fn calculate_cuda_flags(&self, flags: &PinnedMemoryFlags) -> u32 {
let mut cuda_flags = 0u32;
if flags.portable || self.config.enable_portable_memory {
cuda_flags |= cuda_sys::cudaHostAllocPortable;
}
if flags.write_combining || self.config.enable_write_combining {
cuda_flags |= cuda_sys::cudaHostAllocWriteCombined;
}
if flags.enable_mapping || self.config.enable_device_mapping {
cuda_flags |= cuda_sys::cudaHostAllocMapped;
}
cuda_flags | flags.raw_flags
}
fn allocate_cuda_pinned_memory(&self, size: usize, flags: u32) -> CudaResult<*mut u8> {
let mut ptr: *mut std::ffi::c_void = std::ptr::null_mut();
unsafe {
let result =
cuda_sys::cudaHostAlloc(&mut ptr as *mut *mut std::ffi::c_void, size, flags);
if result != crate::cuda::cudaSuccess {
return Err(CudaError::Context {
message: format!("Failed to allocate pinned memory: {:?}", result),
});
}
}
Ok(ptr as *mut u8)
}
fn map_pinned_memory_to_device(
&self,
host_ptr: *mut u8,
size: usize,
) -> CudaResult<Option<DevicePointer<u8>>> {
let mut device_ptr: *mut std::ffi::c_void = std::ptr::null_mut();
unsafe {
let result = cuda_sys::cudaHostGetDevicePointer(
&mut device_ptr as *mut *mut std::ffi::c_void,
host_ptr as *mut std::ffi::c_void,
0, );
if result != crate::cuda::cudaSuccess {
return Err(CudaError::Context {
message: format!("Failed to map pinned memory to device: {:?}", result),
});
}
}
if device_ptr.is_null() {
Ok(None)
} else {
Ok(Some(unsafe {
DevicePointer::<u8>::from_raw(device_ptr as u64)
}))
}
}
fn should_cache_allocation(&self, allocation: &PinnedAllocation) -> bool {
let pools = self.pools.lock().expect("lock should not be poisoned");
if let Some(pool) = pools.get(&allocation.size) {
pool.free_blocks.len() < self.config.max_free_blocks_per_pool
} else {
true }
}
fn return_to_pool(&self, allocation: PinnedAllocation) -> CudaResult<()> {
let mut pools = self.pools.lock().map_err(|_| CudaError::Context {
message: "Failed to acquire pools lock".to_string(),
})?;
if let Some(pool) = pools.get_mut(&allocation.size) {
pool.deallocate(allocation);
}
Ok(())
}
fn free_pinned_allocation(&self, allocation: PinnedAllocation) -> CudaResult<()> {
unsafe {
let result = cuda_sys::cudaFreeHost(allocation.ptr.as_ptr() as *mut std::ffi::c_void);
if result != crate::cuda::cudaSuccess {
return Err(CudaError::Context {
message: format!("Failed to free pinned memory: {:?}", result),
});
}
}
Ok(())
}
fn update_allocation_stats(&self, size: usize) {
let current = self.total_pinned_memory.fetch_add(size, Ordering::Relaxed) + size;
let mut peak = self.peak_pinned_memory.load(Ordering::Relaxed);
while current > peak {
match self.peak_pinned_memory.compare_exchange_weak(
peak,
current,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => break,
Err(new_peak) => peak = new_peak,
}
}
if let Ok(mut stats) = self.allocation_stats.lock() {
stats.base_stats.total_allocations += 1;
stats.base_stats.active_allocations += 1;
stats.base_stats.total_bytes_allocated += size as u64;
stats.base_stats.current_bytes_allocated = current as u64;
stats.base_stats.peak_bytes_allocated = peak as u64;
}
}
fn update_deallocation_stats(&self, allocation: &PinnedAllocation) {
self.total_pinned_memory
.fetch_sub(allocation.size, Ordering::Relaxed);
if let Ok(mut stats) = self.allocation_stats.lock() {
stats.base_stats.active_allocations =
stats.base_stats.active_allocations.saturating_sub(1);
stats.base_stats.current_bytes_allocated =
self.total_pinned_memory.load(Ordering::Relaxed) as u64;
let age = allocation.age();
let total_deallocations =
stats.base_stats.total_allocations - stats.base_stats.active_allocations;
if total_deallocations > 0 {
let total_age =
stats.average_allocation_age * (total_deallocations - 1) as u32 + age;
stats.average_allocation_age = total_age / total_deallocations as u32;
}
}
}
fn record_allocation_success(&self, size: usize, start_time: Instant, cache_hit: bool) {
let allocation_time = start_time.elapsed();
if let Ok(mut stats) = self.allocation_stats.lock() {
let total = stats.base_stats.total_allocations as f32;
if total > 0.0 {
if cache_hit {
stats.base_stats.cache_hit_rate =
((stats.base_stats.cache_hit_rate * (total - 1.0)) + 1.0) / total;
} else {
stats.base_stats.cache_hit_rate =
(stats.base_stats.cache_hit_rate * (total - 1.0)) / total;
}
}
if total > 0.0 {
stats.base_stats.average_allocation_time =
(stats.base_stats.average_allocation_time * (total - 1.0) as u32
+ allocation_time)
/ total as u32;
}
}
}
fn get_cached_allocation_count(&self) -> usize {
self.pools
.lock()
.map(|pools| pools.values().map(|pool| pool.free_blocks.len()).sum())
.unwrap_or(0)
}
fn calculate_fragmentation_level(&self) -> f32 {
0.05 }
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TransferDirection {
HostToDevice,
DeviceToHost,
Bidirectional,
}
#[derive(Debug, Clone)]
pub struct PoolCleanupResult {
pub allocations_freed: usize,
pub bytes_freed: usize,
}
impl PinnedMemoryPool {
fn new(size_class: usize) -> Self {
Self {
size_class,
free_blocks: Vec::new(),
allocated_blocks: Vec::new(),
total_allocations: 0,
peak_usage: 0,
cache_hits: 0,
cache_misses: 0,
last_access: Instant::now(),
}
}
fn allocate(&mut self) -> Option<PinnedAllocation> {
self.last_access = Instant::now();
if let Some(allocation) = self.free_blocks.pop() {
self.cache_hits += 1;
Some(allocation)
} else {
self.cache_misses += 1;
None
}
}
fn deallocate(&mut self, allocation: PinnedAllocation) {
self.free_blocks.push(allocation);
self.last_access = Instant::now();
}
fn add_allocation(&mut self, allocation: PinnedAllocation) {
self.allocated_blocks.push(allocation);
self.total_allocations += 1;
self.peak_usage = self.peak_usage.max(self.allocated_blocks.len());
}
fn cleanup_old_allocations(
&mut self,
now: Instant,
max_age: Duration,
) -> CudaResult<PoolCleanupResult> {
let initial_count = self.free_blocks.len();
let mut bytes_freed = 0;
self.free_blocks.retain(|allocation| {
let age = now.duration_since(allocation.allocation_time);
if age > max_age {
bytes_freed += allocation.size;
unsafe {
let result =
cuda_sys::cudaFreeHost(allocation.ptr.as_ptr() as *mut std::ffi::c_void);
if result != crate::cuda::cudaSuccess {
eprintln!(
"Warning: Failed to free pinned memory during cleanup: {:?}",
result
);
}
}
false
} else {
true
}
});
Ok(PoolCleanupResult {
allocations_freed: initial_count - self.free_blocks.len(),
bytes_freed,
})
}
fn is_empty(&self) -> bool {
self.free_blocks.is_empty() && self.allocated_blocks.is_empty()
}
}
impl TransferStats {
fn update_stats(&mut self, bytes: usize, duration: Duration, bandwidth: f64) {
self.total_transfers += 1;
self.total_bytes += bytes as u64;
self.total_time += duration;
if bandwidth > self.peak_bandwidth {
self.peak_bandwidth = bandwidth;
}
if self.min_bandwidth == 0.0 || bandwidth < self.min_bandwidth {
self.min_bandwidth = bandwidth;
}
if self.total_time.as_secs_f64() > 0.0 {
self.average_bandwidth = self.total_bytes as f64 / self.total_time.as_secs_f64();
}
if self.peak_bandwidth > 0.0 {
self.efficiency = (self.average_bandwidth / self.peak_bandwidth) as f32;
}
}
}
pub fn get_pinned_memory_manager(
device_id: usize,
config: Option<PinnedMemoryConfig>,
) -> CudaResult<Arc<PinnedMemoryManager>> {
let mut managers = PINNED_MANAGERS.lock().map_err(|_| CudaError::Context {
message: "Failed to acquire global managers lock".to_string(),
})?;
if let Some(manager) = managers.get(&device_id) {
Ok(Arc::clone(manager))
} else {
let config = config.unwrap_or_default();
let manager = Arc::new(PinnedMemoryManager::new_for_device(device_id, config)?);
managers.insert(device_id, Arc::clone(&manager));
Ok(manager)
}
}
impl Default for PinnedMemoryConfig {
fn default() -> Self {
Self {
max_pinned_memory: 512 * 1024 * 1024, max_cache_age: Duration::from_secs(300), enable_auto_cleanup: true,
cleanup_interval: Duration::from_secs(60), max_free_blocks_per_pool: 8,
enable_device_mapping: false,
enable_portable_memory: false,
enable_write_combining: false,
enable_transfer_tracking: true,
memory_alignment: 256,
}
}
}
impl Default for PinnedAllocationStats {
fn default() -> Self {
Self {
base_stats: AllocationStats::default(),
mapping_success_rate: 1.0,
average_allocation_age: Duration::from_secs(0),
total_mapped_allocations: 0,
current_mapped_allocations: 0,
memory_pressure_events: 0,
cleanup_operations: 0,
cleanup_bytes_freed: 0,
}
}
}
impl Default for TransferMetrics {
fn default() -> Self {
Self {
host_to_device: TransferStats::default(),
device_to_host: TransferStats::default(),
bidirectional: TransferStats::default(),
overall_efficiency: 0.0,
peak_bandwidth: 0.0,
average_latency: Duration::from_secs(0),
}
}
}
impl Default for TransferStats {
fn default() -> Self {
Self {
total_transfers: 0,
total_bytes: 0,
total_time: Duration::from_secs(0),
average_bandwidth: 0.0,
peak_bandwidth: 0.0,
min_bandwidth: 0.0,
efficiency: 0.0,
}
}
}
impl Default for PinnedMemoryRequest {
fn default() -> Self {
Self {
size: 0,
enable_mapping: false,
flags: PinnedMemoryFlags::default(),
alignment: None,
tag: None,
usage_pattern: UsagePattern::Bidirectional,
priority: AllocationPriority::Normal,
}
}
}
#[cfg(test)]
mod tests {
#![allow(unused_mut)]
use super::*;
#[test]
fn test_pinned_memory_config() {
let config = PinnedMemoryConfig::default();
assert_eq!(config.max_pinned_memory, 512 * 1024 * 1024);
assert!(config.enable_auto_cleanup);
assert!(config.enable_transfer_tracking);
}
#[test]
fn test_pinned_memory_pool() {
let mut pool = PinnedMemoryPool::new(4096);
assert_eq!(pool.size_class, 4096);
assert!(pool.free_blocks.is_empty());
assert!(pool.allocated_blocks.is_empty());
assert!(pool.is_empty());
}
#[test]
fn test_transfer_stats() {
let mut stats = TransferStats::default();
stats.update_stats(1024, Duration::from_millis(10), 1024000.0);
assert_eq!(stats.total_transfers, 1);
assert_eq!(stats.total_bytes, 1024);
assert_eq!(stats.peak_bandwidth, 1024000.0);
}
#[test]
fn test_usage_patterns() {
assert_eq!(UsagePattern::HostToDevice, UsagePattern::HostToDevice);
assert_ne!(UsagePattern::HostToDevice, UsagePattern::DeviceToHost);
}
#[test]
fn test_allocation_priorities() {
assert!(AllocationPriority::Critical > AllocationPriority::High);
assert!(AllocationPriority::High > AllocationPriority::Normal);
assert!(AllocationPriority::Normal > AllocationPriority::Low);
}
#[test]
fn test_pinned_memory_request() {
let request = PinnedMemoryRequest {
size: 4096,
enable_mapping: true,
usage_pattern: UsagePattern::HostToDevice,
priority: AllocationPriority::High,
..Default::default()
};
assert_eq!(request.size, 4096);
assert!(request.enable_mapping);
assert_eq!(request.usage_pattern, UsagePattern::HostToDevice);
assert_eq!(request.priority, AllocationPriority::High);
}
}
pub type MemoryTransferMetrics = TransferMetrics;
pub type PinnedMemoryMetrics = PinnedAllocationStats;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TransferOptimizationStrategy {
MaxBandwidth,
MinLatency,
Balanced,
Adaptive,
}
impl Default for TransferOptimizationStrategy {
fn default() -> Self {
Self::Balanced
}
}