1use std::collections::HashMap;
57use std::path::PathBuf;
58use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
59use std::sync::RwLock;
60use std::time::Instant;
61
62use serde::{Deserialize, Serialize};
63
64#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
66pub enum MemoryTier {
67 Vram,
69 Host,
71 Disk,
73}
74
75impl MemoryTier {
76 pub fn priority(&self) -> u8 {
78 match self {
79 MemoryTier::Vram => 0,
80 MemoryTier::Host => 1,
81 MemoryTier::Disk => 2,
82 }
83 }
84
85 pub fn is_faster_than(&self, other: &MemoryTier) -> bool {
87 self.priority() < other.priority()
88 }
89}
90
91#[derive(Debug, Clone)]
93pub enum VirtualMemoryError {
94 AllocationFailed(String),
96 HandleNotFound(u64),
98 IoError(String),
100 TierNotAvailable(MemoryTier),
102 InvalidOperation(String),
104 SizeMismatch { expected: usize, actual: usize },
106}
107
108impl std::fmt::Display for VirtualMemoryError {
109 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
110 match self {
111 VirtualMemoryError::AllocationFailed(msg) => write!(f, "Allocation failed: {}", msg),
112 VirtualMemoryError::HandleNotFound(id) => write!(f, "Handle {} not found", id),
113 VirtualMemoryError::IoError(msg) => write!(f, "I/O error: {}", msg),
114 VirtualMemoryError::TierNotAvailable(tier) => {
115 write!(f, "Tier {:?} not available", tier)
116 }
117 VirtualMemoryError::InvalidOperation(msg) => write!(f, "Invalid operation: {}", msg),
118 VirtualMemoryError::SizeMismatch { expected, actual } => {
119 write!(f, "Size mismatch: expected {}, got {}", expected, actual)
120 }
121 }
122 }
123}
124
125impl std::error::Error for VirtualMemoryError {}
126
127#[derive(Clone, Debug)]
129pub struct VirtualMemoryConfig {
130 pub enable_vram: bool,
132 pub max_host_bytes: usize,
134 pub max_disk_bytes: usize,
136 pub disk_path: PathBuf,
138 pub auto_migrate: bool,
140 pub eviction_threshold: f64,
142 pub promotion_threshold: u32,
144}
145
146impl Default for VirtualMemoryConfig {
147 fn default() -> Self {
148 Self {
149 enable_vram: false, max_host_bytes: 4 * 1024 * 1024 * 1024, max_disk_bytes: 64 * 1024 * 1024 * 1024, disk_path: std::env::temp_dir().join("embeddenator_vmem"),
153 auto_migrate: true,
154 eviction_threshold: 0.90, promotion_threshold: 3, }
157 }
158}
159
160#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
162pub struct VMemHandle {
163 id: u64,
165 size: usize,
167 home_tier: MemoryTier,
169}
170
171impl VMemHandle {
172 fn new(id: u64, size: usize, home_tier: MemoryTier) -> Self {
173 Self {
174 id,
175 size,
176 home_tier,
177 }
178 }
179
180 pub fn id(&self) -> u64 {
182 self.id
183 }
184
185 pub fn size(&self) -> usize {
187 self.size
188 }
189
190 pub fn home_tier(&self) -> MemoryTier {
192 self.home_tier
193 }
194}
195
196#[derive(Debug)]
198struct AllocationMetadata {
199 handle: VMemHandle,
201 current_tier: MemoryTier,
203 last_access: Instant,
205 access_count: u32,
207 pinned: bool,
209 dirty: bool,
211}
212
213impl AllocationMetadata {
214 fn new(handle: VMemHandle) -> Self {
215 Self {
216 handle,
217 current_tier: handle.home_tier,
218 last_access: Instant::now(),
219 access_count: 0,
220 pinned: false,
221 dirty: false,
222 }
223 }
224
225 fn touch(&mut self) {
226 self.last_access = Instant::now();
227 self.access_count = self.access_count.saturating_add(1);
228 }
229}
230
231struct TierPool {
233 tier: MemoryTier,
235 data: HashMap<u64, Vec<u8>>,
237 used_bytes: usize,
239 max_bytes: usize,
241}
242
243impl TierPool {
244 fn new(tier: MemoryTier, max_bytes: usize) -> Self {
245 Self {
246 tier,
247 data: HashMap::new(),
248 used_bytes: 0,
249 max_bytes,
250 }
251 }
252
253 fn available(&self) -> usize {
254 self.max_bytes.saturating_sub(self.used_bytes)
255 }
256
257 #[allow(dead_code)]
258 fn usage_ratio(&self) -> f64 {
259 if self.max_bytes == 0 {
260 1.0
261 } else {
262 self.used_bytes as f64 / self.max_bytes as f64
263 }
264 }
265
266 fn insert(&mut self, id: u64, data: Vec<u8>) -> Result<(), VirtualMemoryError> {
267 let size = data.len();
268 if size > self.available() {
269 return Err(VirtualMemoryError::AllocationFailed(format!(
270 "Tier {:?} full: need {} bytes, available {}",
271 self.tier,
272 size,
273 self.available()
274 )));
275 }
276 self.data.insert(id, data);
277 self.used_bytes += size;
278 Ok(())
279 }
280
281 fn remove(&mut self, id: u64) -> Option<Vec<u8>> {
282 if let Some(data) = self.data.remove(&id) {
283 self.used_bytes = self.used_bytes.saturating_sub(data.len());
284 Some(data)
285 } else {
286 None
287 }
288 }
289
290 fn get(&self, id: u64) -> Option<&Vec<u8>> {
291 self.data.get(&id)
292 }
293
294 #[allow(dead_code)]
295 fn contains(&self, id: u64) -> bool {
296 self.data.contains_key(&id)
297 }
298}
299
300struct DiskPool {
302 base_path: PathBuf,
304 index: HashMap<u64, PathBuf>,
306 used_bytes: usize,
308 max_bytes: usize,
310}
311
312impl DiskPool {
313 fn new(base_path: PathBuf, max_bytes: usize) -> Result<Self, VirtualMemoryError> {
314 std::fs::create_dir_all(&base_path)
315 .map_err(|e| VirtualMemoryError::IoError(e.to_string()))?;
316 Ok(Self {
317 base_path,
318 index: HashMap::new(),
319 used_bytes: 0,
320 max_bytes,
321 })
322 }
323
324 fn available(&self) -> usize {
325 self.max_bytes.saturating_sub(self.used_bytes)
326 }
327
328 #[allow(dead_code)]
329 fn usage_ratio(&self) -> f64 {
330 if self.max_bytes == 0 {
331 1.0
332 } else {
333 self.used_bytes as f64 / self.max_bytes as f64
334 }
335 }
336
337 fn file_path(&self, id: u64) -> PathBuf {
338 self.base_path.join(format!("vmem_{:016x}.bin", id))
339 }
340
341 fn write(&mut self, id: u64, data: &[u8]) -> Result<(), VirtualMemoryError> {
342 let size = data.len();
343 if size > self.available() {
344 return Err(VirtualMemoryError::AllocationFailed(format!(
345 "Disk pool full: need {} bytes, available {}",
346 size,
347 self.available()
348 )));
349 }
350
351 let path = self.file_path(id);
352 std::fs::write(&path, data).map_err(|e| VirtualMemoryError::IoError(e.to_string()))?;
353
354 if let Some(old_path) = self.index.insert(id, path) {
356 if let Ok(meta) = std::fs::metadata(&old_path) {
358 self.used_bytes = self.used_bytes.saturating_sub(meta.len() as usize);
359 }
360 }
361 self.used_bytes += size;
362
363 Ok(())
364 }
365
366 fn read(&self, id: u64) -> Result<Vec<u8>, VirtualMemoryError> {
367 let path = self
368 .index
369 .get(&id)
370 .ok_or(VirtualMemoryError::HandleNotFound(id))?;
371 std::fs::read(path).map_err(|e| VirtualMemoryError::IoError(e.to_string()))
372 }
373
374 fn remove(&mut self, id: u64) -> Result<(), VirtualMemoryError> {
375 if let Some(path) = self.index.remove(&id) {
376 if let Ok(meta) = std::fs::metadata(&path) {
377 self.used_bytes = self.used_bytes.saturating_sub(meta.len() as usize);
378 }
379 let _ = std::fs::remove_file(&path);
380 }
381 Ok(())
382 }
383
384 #[allow(dead_code)]
385 fn contains(&self, id: u64) -> bool {
386 self.index.contains_key(&id)
387 }
388}
389
390pub struct VirtualMemory {
392 config: VirtualMemoryConfig,
394 host_pool: RwLock<TierPool>,
396 disk_pool: RwLock<DiskPool>,
398 metadata: RwLock<HashMap<u64, AllocationMetadata>>,
400 next_id: AtomicU64,
402 total_allocations: AtomicUsize,
404}
405
406impl VirtualMemory {
407 pub fn new(config: VirtualMemoryConfig) -> Result<Self, VirtualMemoryError> {
409 let host_pool = TierPool::new(MemoryTier::Host, config.max_host_bytes);
410 let disk_pool = DiskPool::new(config.disk_path.clone(), config.max_disk_bytes)?;
411
412 Ok(Self {
413 config,
414 host_pool: RwLock::new(host_pool),
415 disk_pool: RwLock::new(disk_pool),
416 metadata: RwLock::new(HashMap::new()),
417 next_id: AtomicU64::new(1),
418 total_allocations: AtomicUsize::new(0),
419 })
420 }
421
422 pub fn allocate(
427 &self,
428 size: usize,
429 home_tier: MemoryTier,
430 ) -> Result<VMemHandle, VirtualMemoryError> {
431 if home_tier == MemoryTier::Vram {
433 return Err(VirtualMemoryError::TierNotAvailable(MemoryTier::Vram));
434 }
435
436 let can_alloc = match home_tier {
438 MemoryTier::Host => {
439 let pool = self.host_pool.read().unwrap();
440 pool.available() >= size
441 }
442 MemoryTier::Disk => {
443 let pool = self.disk_pool.read().unwrap();
444 pool.available() >= size
445 }
446 MemoryTier::Vram => false,
447 };
448
449 if !can_alloc {
450 if self.config.auto_migrate {
452 self.evict_if_needed(home_tier, size)?;
453 }
454 }
455
456 let id = self.next_id.fetch_add(1, Ordering::SeqCst);
458 let handle = VMemHandle::new(id, size, home_tier);
459
460 let zeros = vec![0u8; size];
462 match home_tier {
463 MemoryTier::Host => {
464 let mut pool = self.host_pool.write().unwrap();
465 pool.insert(id, zeros)?;
466 }
467 MemoryTier::Disk => {
468 let mut pool = self.disk_pool.write().unwrap();
469 pool.write(id, &zeros)?;
470 }
471 MemoryTier::Vram => unreachable!(),
472 }
473
474 let mut meta = self.metadata.write().unwrap();
476 meta.insert(id, AllocationMetadata::new(handle));
477
478 self.total_allocations.fetch_add(1, Ordering::Relaxed);
479 Ok(handle)
480 }
481
482 pub fn free(&self, handle: &VMemHandle) -> Result<(), VirtualMemoryError> {
484 let mut meta_guard = self.metadata.write().unwrap();
485 let meta = meta_guard
486 .remove(&handle.id)
487 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?;
488
489 match meta.current_tier {
491 MemoryTier::Host => {
492 let mut pool = self.host_pool.write().unwrap();
493 pool.remove(handle.id);
494 }
495 MemoryTier::Disk => {
496 let mut pool = self.disk_pool.write().unwrap();
497 pool.remove(handle.id)?;
498 }
499 MemoryTier::Vram => {
500 }
502 }
503
504 if meta.current_tier != meta.handle.home_tier {
506 match meta.handle.home_tier {
507 MemoryTier::Host => {
508 let mut pool = self.host_pool.write().unwrap();
509 pool.remove(handle.id);
510 }
511 MemoryTier::Disk => {
512 let mut pool = self.disk_pool.write().unwrap();
513 pool.remove(handle.id)?;
514 }
515 MemoryTier::Vram => {}
516 }
517 }
518
519 self.total_allocations.fetch_sub(1, Ordering::Relaxed);
520 Ok(())
521 }
522
523 pub fn write(&self, handle: &VMemHandle, data: &[u8]) -> Result<(), VirtualMemoryError> {
525 if data.len() != handle.size {
526 return Err(VirtualMemoryError::SizeMismatch {
527 expected: handle.size,
528 actual: data.len(),
529 });
530 }
531
532 {
534 let mut meta_guard = self.metadata.write().unwrap();
535 let meta = meta_guard
536 .get_mut(&handle.id)
537 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?;
538 meta.touch();
539 meta.dirty = true;
540 }
541
542 let current_tier = {
544 let meta_guard = self.metadata.read().unwrap();
545 meta_guard
546 .get(&handle.id)
547 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?
548 .current_tier
549 };
550
551 match current_tier {
553 MemoryTier::Host => {
554 let mut pool = self.host_pool.write().unwrap();
555 if let Some(existing) = pool.data.get_mut(&handle.id) {
556 existing.copy_from_slice(data);
557 } else {
558 pool.insert(handle.id, data.to_vec())?;
559 }
560 }
561 MemoryTier::Disk => {
562 let mut pool = self.disk_pool.write().unwrap();
563 pool.write(handle.id, data)?;
564 }
565 MemoryTier::Vram => {
566 return Err(VirtualMemoryError::InvalidOperation(
567 "Direct VRAM write not supported, use coherency layer".to_string(),
568 ));
569 }
570 }
571
572 Ok(())
573 }
574
575 pub fn read(&self, handle: &VMemHandle) -> Result<Vec<u8>, VirtualMemoryError> {
577 {
579 let mut meta_guard = self.metadata.write().unwrap();
580 if let Some(meta) = meta_guard.get_mut(&handle.id) {
581 meta.touch();
582 }
583 }
584
585 let current_tier = {
587 let meta_guard = self.metadata.read().unwrap();
588 meta_guard
589 .get(&handle.id)
590 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?
591 .current_tier
592 };
593
594 match current_tier {
596 MemoryTier::Host => {
597 let pool = self.host_pool.read().unwrap();
598 pool.get(handle.id)
599 .cloned()
600 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))
601 }
602 MemoryTier::Disk => {
603 let pool = self.disk_pool.read().unwrap();
604 pool.read(handle.id)
605 }
606 MemoryTier::Vram => Err(VirtualMemoryError::InvalidOperation(
607 "Direct VRAM read not supported, use coherency layer".to_string(),
608 )),
609 }
610 }
611
612 pub fn migrate(
614 &self,
615 handle: &VMemHandle,
616 target_tier: MemoryTier,
617 ) -> Result<(), VirtualMemoryError> {
618 self.migrate_internal(handle, target_tier, false)
619 }
620
621 fn migrate_internal(
623 &self,
624 handle: &VMemHandle,
625 target_tier: MemoryTier,
626 force_remove_source: bool,
627 ) -> Result<(), VirtualMemoryError> {
628 if target_tier == MemoryTier::Vram {
629 return Err(VirtualMemoryError::TierNotAvailable(MemoryTier::Vram));
630 }
631
632 let current_tier = {
633 let meta_guard = self.metadata.read().unwrap();
634 meta_guard
635 .get(&handle.id)
636 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?
637 .current_tier
638 };
639
640 if current_tier == target_tier {
641 return Ok(()); }
643
644 let data = self.read(handle)?;
646
647 match target_tier {
649 MemoryTier::Host => {
650 let mut pool = self.host_pool.write().unwrap();
651 pool.insert(handle.id, data)?;
652 }
653 MemoryTier::Disk => {
654 let mut pool = self.disk_pool.write().unwrap();
655 pool.write(handle.id, &data)?;
656 }
657 MemoryTier::Vram => unreachable!(),
658 }
659
660 if current_tier != handle.home_tier || force_remove_source {
663 match current_tier {
664 MemoryTier::Host => {
665 let mut pool = self.host_pool.write().unwrap();
666 pool.remove(handle.id);
667 }
668 MemoryTier::Disk => {
669 let mut pool = self.disk_pool.write().unwrap();
670 pool.remove(handle.id)?;
671 }
672 MemoryTier::Vram => {}
673 }
674 }
675
676 {
678 let mut meta_guard = self.metadata.write().unwrap();
679 if let Some(meta) = meta_guard.get_mut(&handle.id) {
680 meta.current_tier = target_tier;
681 if target_tier != handle.home_tier {
683 meta.dirty = true;
684 }
685 }
686 }
687
688 Ok(())
689 }
690
691 pub fn pin(&self, handle: &VMemHandle) -> Result<(), VirtualMemoryError> {
693 let mut meta_guard = self.metadata.write().unwrap();
694 let meta = meta_guard
695 .get_mut(&handle.id)
696 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?;
697 meta.pinned = true;
698 Ok(())
699 }
700
701 pub fn unpin(&self, handle: &VMemHandle) -> Result<(), VirtualMemoryError> {
703 let mut meta_guard = self.metadata.write().unwrap();
704 let meta = meta_guard
705 .get_mut(&handle.id)
706 .ok_or(VirtualMemoryError::HandleNotFound(handle.id))?;
707 meta.pinned = false;
708 Ok(())
709 }
710
711 pub fn stats(&self) -> VirtualMemoryStats {
713 let meta_guard = self.metadata.read().unwrap();
716 let host_pool = self.host_pool.read().unwrap();
717 let disk_pool = self.disk_pool.read().unwrap();
718
719 let host_allocations = meta_guard
720 .values()
721 .filter(|m| m.current_tier == MemoryTier::Host)
722 .count();
723 let disk_allocations = meta_guard
724 .values()
725 .filter(|m| m.current_tier == MemoryTier::Disk)
726 .count();
727 let pinned = meta_guard.values().filter(|m| m.pinned).count();
728 let dirty = meta_guard.values().filter(|m| m.dirty).count();
729
730 VirtualMemoryStats {
731 total_allocations: self.total_allocations.load(Ordering::Relaxed),
732 host_used_bytes: host_pool.used_bytes,
733 host_max_bytes: host_pool.max_bytes,
734 host_allocations,
735 disk_used_bytes: disk_pool.used_bytes,
736 disk_max_bytes: disk_pool.max_bytes,
737 disk_allocations,
738 pinned_allocations: pinned,
739 dirty_allocations: dirty,
740 }
741 }
742
743 fn evict_if_needed(
745 &self,
746 tier: MemoryTier,
747 needed_bytes: usize,
748 ) -> Result<(), VirtualMemoryError> {
749 let (current_used, max_bytes) = match tier {
750 MemoryTier::Host => {
751 let pool = self.host_pool.read().unwrap();
752 (pool.used_bytes, pool.max_bytes)
753 }
754 MemoryTier::Disk => {
755 let pool = self.disk_pool.read().unwrap();
756 (pool.used_bytes, pool.max_bytes)
757 }
758 MemoryTier::Vram => return Ok(()),
759 };
760
761 let target_usage = current_used + needed_bytes;
762 let threshold = (max_bytes as f64 * self.config.eviction_threshold) as usize;
763
764 if target_usage <= threshold {
765 return Ok(()); }
767
768 let to_evict: Vec<VMemHandle> = {
770 let meta_guard = self.metadata.read().unwrap();
771 let mut candidates: Vec<_> = meta_guard
772 .values()
773 .filter(|m| !m.pinned && m.current_tier == tier)
774 .collect();
775 candidates.sort_by_key(|m| m.last_access);
776
777 let mut to_free = 0usize;
778 let bytes_to_free = target_usage.saturating_sub(threshold);
779 candidates
780 .iter()
781 .take_while(|m| {
782 if to_free >= bytes_to_free {
783 false
784 } else {
785 to_free += m.handle.size;
786 true
787 }
788 })
789 .map(|m| m.handle)
790 .collect()
791 };
792
793 for handle in to_evict {
795 if tier == MemoryTier::Host {
796 self.migrate_internal(&handle, MemoryTier::Disk, true)?;
798 } else if tier == MemoryTier::Disk {
799 return Err(VirtualMemoryError::AllocationFailed(
805 "Disk tier full, cannot evict (no slower tier available)".to_string(),
806 ));
807 }
808 }
809
810 Ok(())
811 }
812
813 pub fn flush(&self) -> Result<(), VirtualMemoryError> {
815 let dirty_handles: Vec<VMemHandle> = {
816 let meta_guard = self.metadata.read().unwrap();
817 meta_guard
818 .values()
819 .filter(|m| m.dirty)
820 .map(|m| m.handle)
821 .collect()
822 };
823
824 for handle in dirty_handles {
825 let (current, home) = {
827 let meta_guard = self.metadata.read().unwrap();
828 if let Some(meta) = meta_guard.get(&handle.id) {
829 (meta.current_tier, meta.handle.home_tier)
830 } else {
831 continue;
832 }
833 };
834
835 if current != home {
836 let data = self.read(&handle)?;
837 match home {
838 MemoryTier::Host => {
839 let mut pool = self.host_pool.write().unwrap();
840 if pool.contains(handle.id) {
841 if let Some(existing) = pool.data.get_mut(&handle.id) {
842 existing.copy_from_slice(&data);
843 }
844 } else {
845 pool.insert(handle.id, data)?;
846 }
847 }
848 MemoryTier::Disk => {
849 let mut pool = self.disk_pool.write().unwrap();
850 pool.write(handle.id, &data)?;
851 }
852 MemoryTier::Vram => {}
853 }
854 }
855
856 let mut meta_guard = self.metadata.write().unwrap();
858 if let Some(meta) = meta_guard.get_mut(&handle.id) {
859 meta.dirty = false;
860 }
861 }
862
863 Ok(())
864 }
865}
866
867#[derive(Clone, Debug, Default)]
869pub struct VirtualMemoryStats {
870 pub total_allocations: usize,
872 pub host_used_bytes: usize,
874 pub host_max_bytes: usize,
876 pub host_allocations: usize,
878 pub disk_used_bytes: usize,
880 pub disk_max_bytes: usize,
882 pub disk_allocations: usize,
884 pub pinned_allocations: usize,
886 pub dirty_allocations: usize,
888}
889
890impl VirtualMemoryStats {
891 pub fn host_usage_ratio(&self) -> f64 {
893 if self.host_max_bytes == 0 {
894 0.0
895 } else {
896 self.host_used_bytes as f64 / self.host_max_bytes as f64
897 }
898 }
899
900 pub fn disk_usage_ratio(&self) -> f64 {
902 if self.disk_max_bytes == 0 {
903 0.0
904 } else {
905 self.disk_used_bytes as f64 / self.disk_max_bytes as f64
906 }
907 }
908}
909
910#[cfg(test)]
911mod tests {
912 use super::*;
913 use std::sync::Arc;
914 use tempfile::TempDir;
915
916 fn test_config_with_dir(temp_dir: &TempDir) -> VirtualMemoryConfig {
917 VirtualMemoryConfig {
918 enable_vram: false,
919 max_host_bytes: 1024 * 1024, max_disk_bytes: 10 * 1024 * 1024, disk_path: temp_dir.path().to_path_buf(),
922 auto_migrate: true,
923 eviction_threshold: 0.80,
924 promotion_threshold: 3,
925 }
926 }
927
928 #[test]
929 fn test_memory_tier_priority() {
930 assert!(MemoryTier::Vram.is_faster_than(&MemoryTier::Host));
931 assert!(MemoryTier::Host.is_faster_than(&MemoryTier::Disk));
932 assert!(!MemoryTier::Disk.is_faster_than(&MemoryTier::Host));
933 }
934
935 #[test]
936 fn test_allocate_host() {
937 let temp_dir = TempDir::new().unwrap();
938 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
939 let handle = vmem.allocate(1024, MemoryTier::Host).unwrap();
940
941 assert_eq!(handle.size, 1024);
942 assert_eq!(handle.home_tier, MemoryTier::Host);
943
944 let stats = vmem.stats();
945 assert_eq!(stats.total_allocations, 1);
946 assert_eq!(stats.host_allocations, 1);
947 }
948
949 #[test]
950 fn test_allocate_disk() {
951 let temp_dir = TempDir::new().unwrap();
952 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
953 let handle = vmem.allocate(2048, MemoryTier::Disk).unwrap();
954
955 assert_eq!(handle.size, 2048);
956 assert_eq!(handle.home_tier, MemoryTier::Disk);
957
958 let stats = vmem.stats();
959 assert_eq!(stats.total_allocations, 1);
960 assert_eq!(stats.disk_allocations, 1);
961 }
962
963 #[test]
964 fn test_write_read_roundtrip() {
965 let temp_dir = TempDir::new().unwrap();
966 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
967 let handle = vmem.allocate(100, MemoryTier::Host).unwrap();
968
969 let data: Vec<u8> = (0..100).collect();
970 vmem.write(&handle, &data).unwrap();
971
972 let read_data = vmem.read(&handle).unwrap();
973 assert_eq!(data, read_data);
974 }
975
976 #[test]
977 fn test_disk_persistence() {
978 let temp_dir = TempDir::new().unwrap();
979 let config = test_config_with_dir(&temp_dir);
980 let vmem = VirtualMemory::new(config).unwrap();
981 let handle = vmem.allocate(256, MemoryTier::Disk).unwrap();
982
983 let data: Vec<u8> = (0..=255).collect();
984 vmem.write(&handle, &data).unwrap();
985
986 let read_data = vmem.read(&handle).unwrap();
987 assert_eq!(data, read_data);
988 }
989
990 #[test]
991 fn test_migrate_host_to_disk() {
992 let temp_dir = TempDir::new().unwrap();
993 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
994 let handle = vmem.allocate(512, MemoryTier::Host).unwrap();
995
996 let data: Vec<u8> = vec![42; 512];
997 vmem.write(&handle, &data).unwrap();
998
999 vmem.migrate(&handle, MemoryTier::Disk).unwrap();
1001
1002 let read_data = vmem.read(&handle).unwrap();
1004 assert_eq!(data, read_data);
1005 }
1006
1007 #[test]
1008 fn test_pin_unpin() {
1009 let temp_dir = TempDir::new().unwrap();
1010 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1011 let handle = vmem.allocate(100, MemoryTier::Host).unwrap();
1012
1013 vmem.pin(&handle).unwrap();
1014 vmem.unpin(&handle).unwrap();
1015
1016 }
1018
1019 #[test]
1020 fn test_free() {
1021 let temp_dir = TempDir::new().unwrap();
1022 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1023 let handle = vmem.allocate(100, MemoryTier::Host).unwrap();
1024
1025 let stats = vmem.stats();
1026 assert_eq!(stats.total_allocations, 1);
1027
1028 vmem.free(&handle).unwrap();
1029
1030 let stats = vmem.stats();
1031 assert_eq!(stats.total_allocations, 0);
1032 }
1033
1034 #[test]
1035 fn test_stats() {
1036 let temp_dir = TempDir::new().unwrap();
1037 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1038
1039 let _h1 = vmem.allocate(100, MemoryTier::Host).unwrap();
1041 let _h2 = vmem.allocate(200, MemoryTier::Host).unwrap();
1042 let _h3 = vmem.allocate(300, MemoryTier::Disk).unwrap();
1043
1044 let stats = vmem.stats();
1045 assert_eq!(stats.total_allocations, 3);
1046 assert_eq!(stats.host_allocations, 2);
1047 assert_eq!(stats.disk_allocations, 1);
1048 assert_eq!(stats.host_used_bytes, 300); assert_eq!(stats.disk_used_bytes, 300);
1050 }
1051
1052 #[test]
1053 fn test_size_mismatch_error() {
1054 let temp_dir = TempDir::new().unwrap();
1055 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1056 let handle = vmem.allocate(100, MemoryTier::Host).unwrap();
1057
1058 let result = vmem.write(&handle, &[0u8; 50]);
1060 assert!(matches!(
1061 result,
1062 Err(VirtualMemoryError::SizeMismatch { .. })
1063 ));
1064 }
1065
1066 #[test]
1067 fn test_concurrent_access() {
1068 let temp_dir = TempDir::new().unwrap();
1069 let vmem = Arc::new(VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap());
1070 let handles: Vec<_> = (0..10)
1071 .map(|_| vmem.allocate(100, MemoryTier::Host).unwrap())
1072 .collect();
1073
1074 let threads: Vec<_> = handles
1076 .into_iter()
1077 .map(|handle| {
1078 let vmem = Arc::clone(&vmem);
1079 std::thread::spawn(move || {
1080 let data = vec![42u8; 100];
1081 vmem.write(&handle, &data).unwrap();
1082 let read = vmem.read(&handle).unwrap();
1083 assert_eq!(data, read);
1084 })
1085 })
1086 .collect();
1087
1088 for t in threads {
1089 t.join().unwrap();
1090 }
1091 }
1092
1093 #[test]
1094 fn test_eviction_from_host_to_disk() {
1095 let temp_dir = TempDir::new().unwrap();
1096 let config = VirtualMemoryConfig {
1097 enable_vram: false,
1098 max_host_bytes: 500, max_disk_bytes: 10 * 1024 * 1024,
1100 disk_path: temp_dir.path().to_path_buf(),
1101 auto_migrate: true,
1102 eviction_threshold: 0.80, promotion_threshold: 3,
1104 };
1105 let vmem = VirtualMemory::new(config).unwrap();
1106
1107 let h1 = vmem.allocate(200, MemoryTier::Host).unwrap();
1109 let h2 = vmem.allocate(200, MemoryTier::Host).unwrap();
1110
1111 vmem.write(&h1, &[1u8; 200]).unwrap();
1113 vmem.write(&h2, &[2u8; 200]).unwrap();
1114
1115 let h3 = vmem.allocate(200, MemoryTier::Host).unwrap();
1117 vmem.write(&h3, &[3u8; 200]).unwrap();
1118
1119 let stats = vmem.stats();
1121 assert!(stats.disk_allocations > 0, "Should have evicted to disk");
1122 assert!(
1123 stats.host_used_bytes <= 400,
1124 "Host should be under threshold"
1125 );
1126
1127 let data1 = vmem.read(&h1).unwrap();
1129 let data2 = vmem.read(&h2).unwrap();
1130 let data3 = vmem.read(&h3).unwrap();
1131
1132 assert_eq!(data1, vec![1u8; 200]);
1133 assert_eq!(data2, vec![2u8; 200]);
1134 assert_eq!(data3, vec![3u8; 200]);
1135 }
1136
1137 #[test]
1138 fn test_pinned_allocation_not_evicted() {
1139 let temp_dir = TempDir::new().unwrap();
1140 let config = VirtualMemoryConfig {
1141 enable_vram: false,
1142 max_host_bytes: 400, max_disk_bytes: 10 * 1024 * 1024,
1144 disk_path: temp_dir.path().to_path_buf(),
1145 auto_migrate: true,
1146 eviction_threshold: 0.75, promotion_threshold: 3,
1148 };
1149 let vmem = VirtualMemory::new(config).unwrap();
1150
1151 let h1 = vmem.allocate(150, MemoryTier::Host).unwrap();
1153 vmem.write(&h1, &[1u8; 150]).unwrap();
1154 vmem.pin(&h1).unwrap();
1155
1156 let h2 = vmem.allocate(150, MemoryTier::Host).unwrap();
1158 vmem.write(&h2, &[2u8; 150]).unwrap();
1159
1160 let h3 = vmem.allocate(150, MemoryTier::Host).unwrap();
1162 vmem.write(&h3, &[3u8; 150]).unwrap();
1163
1164 let stats = vmem.stats();
1166 assert_eq!(stats.pinned_allocations, 1);
1168
1169 assert_eq!(vmem.read(&h1).unwrap(), vec![1u8; 150]);
1171 assert_eq!(vmem.read(&h2).unwrap(), vec![2u8; 150]);
1172 assert_eq!(vmem.read(&h3).unwrap(), vec![3u8; 150]);
1173 }
1174
1175 #[test]
1176 fn test_flush_persists_dirty_data() {
1177 let temp_dir = TempDir::new().unwrap();
1178 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1179
1180 let handle = vmem.allocate(256, MemoryTier::Disk).unwrap();
1182 let original_data: Vec<u8> = (0..=255).collect();
1183 vmem.write(&handle, &original_data).unwrap();
1184
1185 vmem.migrate(&handle, MemoryTier::Host).unwrap();
1187
1188 let modified_data: Vec<u8> = (0..=255).rev().collect();
1190 vmem.write(&handle, &modified_data).unwrap();
1191
1192 let stats = vmem.stats();
1194 assert_eq!(stats.dirty_allocations, 1);
1195
1196 vmem.flush().unwrap();
1198
1199 let stats = vmem.stats();
1201 assert_eq!(stats.dirty_allocations, 0);
1202
1203 let read_data = vmem.read(&handle).unwrap();
1205 assert_eq!(read_data, modified_data);
1206 }
1207
1208 #[test]
1209 fn test_vmemhandle_accessors() {
1210 let temp_dir = TempDir::new().unwrap();
1211 let vmem = VirtualMemory::new(test_config_with_dir(&temp_dir)).unwrap();
1212
1213 let handle = vmem.allocate(1024, MemoryTier::Host).unwrap();
1214
1215 assert!(handle.id() > 0);
1217 assert_eq!(handle.size(), 1024);
1218 assert_eq!(handle.home_tier(), MemoryTier::Host);
1219 }
1220}