1use std::sync::atomic::{AtomicU64, Ordering};
47
48#[cfg(feature = "cuda")]
49use crate::gpu::GpuError;
50#[cfg(feature = "cuda")]
51use crate::vram_pool::{VramHandle, VramPool};
52use crate::vsa::SparseVec;
53
54#[cfg(not(feature = "cuda"))]
56#[derive(Debug, Clone)]
57pub enum GpuError {
58 NotAvailable,
60}
61
62#[derive(Clone, Copy, Debug, PartialEq, Eq)]
64pub enum CoherencyState {
65 HostOnly,
67 DeviceOnly,
69 Synced,
71 HostDirty,
73 DeviceDirty,
75}
76
77impl CoherencyState {
78 pub fn host_is_current(&self) -> bool {
80 matches!(
81 self,
82 CoherencyState::HostOnly | CoherencyState::Synced | CoherencyState::HostDirty
83 )
84 }
85
86 pub fn device_is_current(&self) -> bool {
88 matches!(
89 self,
90 CoherencyState::DeviceOnly | CoherencyState::Synced | CoherencyState::DeviceDirty
91 )
92 }
93
94 pub fn needs_sync(&self) -> bool {
96 matches!(
97 self,
98 CoherencyState::HostDirty | CoherencyState::DeviceDirty
99 )
100 }
101}
102
103#[derive(Debug)]
108pub struct CoherentEngram {
109 host_data: Vec<u8>,
111 #[cfg(feature = "cuda")]
113 device_handle: Option<VramHandle>,
114 #[cfg(not(feature = "cuda"))]
115 device_handle: Option<()>,
116 state: CoherencyState,
118 version: AtomicU64,
120}
121
122impl CoherentEngram {
123 pub fn new(data: Vec<u8>) -> Self {
125 Self {
126 host_data: data,
127 device_handle: None,
128 state: CoherencyState::HostOnly,
129 version: AtomicU64::new(0),
130 }
131 }
132
133 pub fn from_sparse_vec(vec: &SparseVec) -> Self {
135 let mut data = Vec::new();
137
138 let pos_len = vec.pos.len() as u32;
140 let neg_len = vec.neg.len() as u32;
141 data.extend_from_slice(&pos_len.to_le_bytes());
142 data.extend_from_slice(&neg_len.to_le_bytes());
143
144 for &dim in &vec.pos {
146 data.extend_from_slice(&(dim as u32).to_le_bytes());
147 }
148
149 for &dim in &vec.neg {
151 data.extend_from_slice(&(dim as u32).to_le_bytes());
152 }
153
154 Self::new(data)
155 }
156
157 pub fn to_sparse_vec(&self) -> Option<SparseVec> {
159 if self.host_data.len() < 8 {
160 return None;
161 }
162
163 let pos_len = u32::from_le_bytes([
164 self.host_data[0],
165 self.host_data[1],
166 self.host_data[2],
167 self.host_data[3],
168 ]) as usize;
169 let neg_len = u32::from_le_bytes([
170 self.host_data[4],
171 self.host_data[5],
172 self.host_data[6],
173 self.host_data[7],
174 ]) as usize;
175
176 let expected_size = 8 + (pos_len + neg_len) * 4;
177 if self.host_data.len() < expected_size {
178 return None;
179 }
180
181 let mut pos = Vec::with_capacity(pos_len);
182 let mut offset = 8;
183 for _ in 0..pos_len {
184 let dim = u32::from_le_bytes([
185 self.host_data[offset],
186 self.host_data[offset + 1],
187 self.host_data[offset + 2],
188 self.host_data[offset + 3],
189 ]) as usize;
190 pos.push(dim);
191 offset += 4;
192 }
193
194 let mut neg = Vec::with_capacity(neg_len);
195 for _ in 0..neg_len {
196 let dim = u32::from_le_bytes([
197 self.host_data[offset],
198 self.host_data[offset + 1],
199 self.host_data[offset + 2],
200 self.host_data[offset + 3],
201 ]) as usize;
202 neg.push(dim);
203 offset += 4;
204 }
205
206 Some(SparseVec { pos, neg })
207 }
208
209 pub fn state(&self) -> CoherencyState {
211 self.state
212 }
213
214 pub fn version(&self) -> u64 {
216 self.version.load(Ordering::Acquire)
217 }
218
219 fn bump_version(&self) {
221 self.version.fetch_add(1, Ordering::AcqRel);
222 }
223
224 pub fn host_data(&self) -> &[u8] {
226 &self.host_data
227 }
228
229 pub fn host_data_mut(&mut self) -> &mut Vec<u8> {
231 self.state = match self.state {
232 CoherencyState::HostOnly => CoherencyState::HostOnly,
233 CoherencyState::Synced => CoherencyState::HostDirty,
234 CoherencyState::HostDirty => CoherencyState::HostDirty,
235 _ => CoherencyState::HostDirty,
236 };
237 self.bump_version();
238 &mut self.host_data
239 }
240
241 pub fn is_on_device(&self) -> bool {
243 self.device_handle.is_some()
244 }
245
246 #[cfg(feature = "cuda")]
248 pub fn upload_to_device(&mut self, pool: &VramPool) -> Result<(), GpuError> {
249 let handle = if let Some(h) = self.device_handle {
251 h
252 } else {
253 let h = pool.allocate(self.host_data.len())?;
254 self.device_handle = Some(h);
255 h
256 };
257
258 pool.upload(&handle, &self.host_data)?;
260
261 self.state = CoherencyState::Synced;
263 Ok(())
264 }
265
266 #[cfg(feature = "cuda")]
268 pub fn download_to_host(&mut self, pool: &VramPool) -> Result<(), GpuError> {
269 let handle = self
270 .device_handle
271 .ok_or_else(|| GpuError::InvalidValue("No device handle".to_string()))?;
272
273 self.host_data = pool.download(&handle)?;
274 self.state = CoherencyState::Synced;
275 self.bump_version();
276 Ok(())
277 }
278
279 #[cfg(feature = "cuda")]
281 pub fn sync(&mut self, pool: &VramPool) -> Result<(), GpuError> {
282 match self.state {
283 CoherencyState::HostDirty => {
284 if self.device_handle.is_some() {
286 self.upload_to_device(pool)?;
287 }
288 self.state = if self.device_handle.is_some() {
290 CoherencyState::Synced
291 } else {
292 CoherencyState::HostOnly
293 };
294 }
295 CoherencyState::DeviceDirty => {
296 self.download_to_host(pool)?;
298 }
299 _ => {
300 }
302 }
303 Ok(())
304 }
305
306 pub fn mark_device_dirty(&mut self) {
308 if self.device_handle.is_some() {
309 self.state = CoherencyState::DeviceDirty;
310 self.bump_version();
311 }
312 }
313
314 #[cfg(feature = "cuda")]
316 pub fn release_device(&mut self, pool: &VramPool) -> Result<(), GpuError> {
317 if let Some(handle) = self.device_handle.take() {
318 if self.state == CoherencyState::DeviceDirty {
320 let data = pool.download(&handle)?;
321 self.host_data = data;
322 }
323 pool.free(handle)?;
324 self.state = CoherencyState::HostOnly;
325 }
326 Ok(())
327 }
328
329 #[cfg(feature = "cuda")]
331 pub fn device_handle(&self) -> Option<VramHandle> {
332 self.device_handle
333 }
334}
335
336#[cfg(not(feature = "cuda"))]
338impl CoherentEngram {
339 pub fn upload_to_device(&mut self, _pool: &()) -> Result<(), GpuError> {
340 Err(GpuError::NotAvailable)
341 }
342
343 pub fn download_to_host(&mut self, _pool: &()) -> Result<(), GpuError> {
344 Err(GpuError::NotAvailable)
345 }
346
347 pub fn sync(&mut self, _pool: &()) -> Result<(), GpuError> {
348 Err(GpuError::NotAvailable)
349 }
350
351 pub fn release_device(&mut self, _pool: &()) -> Result<(), GpuError> {
352 Ok(())
353 }
354}
355
356#[derive(Default)]
360pub struct CoherencyManager {
361 engrams: std::collections::HashMap<u64, CoherentEngram>,
363 next_id: AtomicU64,
365}
366
367impl CoherencyManager {
368 pub fn new() -> Self {
370 Self {
371 engrams: std::collections::HashMap::new(),
372 next_id: AtomicU64::new(1),
373 }
374 }
375
376 pub fn register(&mut self, engram: CoherentEngram) -> u64 {
378 let id = self.next_id.fetch_add(1, Ordering::SeqCst);
379 self.engrams.insert(id, engram);
380 id
381 }
382
383 pub fn get(&self, id: u64) -> Option<&CoherentEngram> {
385 self.engrams.get(&id)
386 }
387
388 pub fn get_mut(&mut self, id: u64) -> Option<&mut CoherentEngram> {
390 self.engrams.get_mut(&id)
391 }
392
393 pub fn remove(&mut self, id: u64) -> Option<CoherentEngram> {
395 self.engrams.remove(&id)
396 }
397
398 #[cfg(feature = "cuda")]
400 pub fn sync_all(&mut self, pool: &VramPool) -> Result<(), GpuError> {
401 for engram in self.engrams.values_mut() {
402 if engram.state().needs_sync() {
403 engram.sync(pool)?;
404 }
405 }
406 Ok(())
407 }
408
409 pub fn stats(&self) -> CoherencyStats {
411 let total = self.engrams.len();
412 let host_only = self
413 .engrams
414 .values()
415 .filter(|e| e.state() == CoherencyState::HostOnly)
416 .count();
417 let device_only = self
418 .engrams
419 .values()
420 .filter(|e| e.state() == CoherencyState::DeviceOnly)
421 .count();
422 let synced = self
423 .engrams
424 .values()
425 .filter(|e| e.state() == CoherencyState::Synced)
426 .count();
427 let dirty = self
428 .engrams
429 .values()
430 .filter(|e| e.state().needs_sync())
431 .count();
432
433 CoherencyStats {
434 total,
435 host_only,
436 device_only,
437 synced,
438 dirty,
439 }
440 }
441}
442
443#[derive(Clone, Debug, Default)]
445pub struct CoherencyStats {
446 pub total: usize,
448 pub host_only: usize,
450 pub device_only: usize,
452 pub synced: usize,
454 pub dirty: usize,
456}
457
458#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
464#[repr(u8)]
465pub enum Tier {
466 Vram = 0,
468 Host = 1,
470 Disk = 2,
472}
473
474impl Tier {
475 pub fn priority(&self) -> u8 {
477 match self {
478 Tier::Vram => 0,
479 Tier::Host => 1,
480 Tier::Disk => 2,
481 }
482 }
483}
484
485#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
487pub struct TierMask(u8);
488
489impl TierMask {
490 pub const NONE: TierMask = TierMask(0);
492 pub const VRAM: TierMask = TierMask(1 << 0);
494 pub const HOST: TierMask = TierMask(1 << 1);
496 pub const DISK: TierMask = TierMask(1 << 2);
498
499 pub fn from_tier(tier: Tier) -> Self {
501 match tier {
502 Tier::Vram => Self::VRAM,
503 Tier::Host => Self::HOST,
504 Tier::Disk => Self::DISK,
505 }
506 }
507
508 pub fn has(&self, tier: Tier) -> bool {
510 let bit = match tier {
511 Tier::Vram => Self::VRAM.0,
512 Tier::Host => Self::HOST.0,
513 Tier::Disk => Self::DISK.0,
514 };
515 (self.0 & bit) != 0
516 }
517
518 pub fn add(&mut self, tier: Tier) {
520 self.0 |= TierMask::from_tier(tier).0;
521 }
522
523 pub fn remove(&mut self, tier: Tier) {
525 self.0 &= !TierMask::from_tier(tier).0;
526 }
527
528 pub fn union(&self, other: TierMask) -> TierMask {
530 TierMask(self.0 | other.0)
531 }
532
533 pub fn count(&self) -> u32 {
535 self.0.count_ones()
536 }
537
538 pub fn any(&self) -> bool {
540 self.0 != 0
541 }
542
543 pub fn fastest(&self) -> Option<Tier> {
545 if self.has(Tier::Vram) {
546 Some(Tier::Vram)
547 } else if self.has(Tier::Host) {
548 Some(Tier::Host)
549 } else if self.has(Tier::Disk) {
550 Some(Tier::Disk)
551 } else {
552 None
553 }
554 }
555
556 pub fn iter(&self) -> impl Iterator<Item = Tier> {
558 let mask = *self;
559 [Tier::Vram, Tier::Host, Tier::Disk]
560 .into_iter()
561 .filter(move |&t| mask.has(t))
562 }
563}
564
565#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)]
567pub enum WritePolicy {
568 #[default]
570 WriteBack,
571 WriteThrough,
573 WriteAll,
575}
576
577#[derive(Clone, Debug)]
579pub struct TieredState {
580 valid: TierMask,
582 dirty: TierMask,
584 owner: Option<Tier>,
586 home: Tier,
588 epoch: u64,
590}
591
592impl TieredState {
593 pub fn new(tier: Tier, home: Tier) -> Self {
595 Self {
596 valid: TierMask::from_tier(tier),
597 dirty: TierMask::NONE,
598 owner: Some(tier),
599 home,
600 epoch: 0,
601 }
602 }
603
604 pub fn host_resident() -> Self {
606 Self::new(Tier::Host, Tier::Host)
607 }
608
609 pub fn disk_resident() -> Self {
611 Self::new(Tier::Disk, Tier::Disk)
612 }
613
614 pub fn is_valid(&self, tier: Tier) -> bool {
616 self.valid.has(tier)
617 }
618
619 pub fn is_dirty(&self, tier: Tier) -> bool {
621 self.dirty.has(tier)
622 }
623
624 pub fn needs_sync(&self) -> bool {
626 self.dirty.any()
627 }
628
629 pub fn fastest_valid(&self) -> Option<Tier> {
631 self.valid.fastest()
632 }
633
634 pub fn owner(&self) -> Option<Tier> {
636 self.owner
637 }
638
639 pub fn epoch(&self) -> u64 {
641 self.epoch
642 }
643
644 pub fn has_valid_copy(&self, tier: Tier) -> bool {
646 self.valid.has(tier)
647 }
648
649 pub fn record_write(&mut self, tier: Tier, policy: WritePolicy) {
651 match policy {
652 WritePolicy::WriteBack => {
653 self.valid = TierMask::from_tier(tier);
655 self.dirty = TierMask::NONE;
656 self.dirty.add(tier);
657 }
658 WritePolicy::WriteThrough => {
659 self.valid = TierMask::from_tier(tier);
661 self.valid.add(self.home);
662 self.dirty = TierMask::NONE;
663 }
664 WritePolicy::WriteAll => {
665 self.valid.add(tier);
667 self.dirty = TierMask::NONE;
668 }
669 }
670 self.owner = Some(tier);
671 self.epoch += 1;
672 }
673
674 pub fn mark_synced(&mut self, tier: Tier) {
676 self.valid.add(tier);
677 self.dirty.remove(tier);
678 }
679
680 pub fn invalidate(&mut self, tier: Tier) {
682 self.valid.remove(tier);
683 self.dirty.remove(tier);
684 if self.owner == Some(tier) {
685 self.owner = self.valid.fastest();
686 }
687 }
688
689 pub fn sync_to(&mut self, target: Tier) -> bool {
692 if !self.needs_sync() || self.is_valid(target) {
693 return false;
694 }
695 self.valid.add(target);
696 if target == self.home {
697 self.dirty = TierMask::NONE;
698 }
699 true
700 }
701
702 pub fn tiers_needing_sync(&self) -> Vec<Tier> {
704 if !self.needs_sync() {
705 return vec![];
706 }
707 if !self.is_valid(self.home) {
709 vec![self.home]
710 } else {
711 vec![]
712 }
713 }
714}
715
716#[derive(Debug)]
718pub struct TieredBlock {
719 pub id: u64,
721 pub size: usize,
723 state: TieredState,
725 version: AtomicU64,
727}
728
729impl TieredBlock {
730 pub fn new_host(id: u64, size: usize) -> Self {
732 Self {
733 id,
734 size,
735 state: TieredState::host_resident(),
736 version: AtomicU64::new(0),
737 }
738 }
739
740 pub fn new_disk(id: u64, size: usize) -> Self {
742 Self {
743 id,
744 size,
745 state: TieredState::disk_resident(),
746 version: AtomicU64::new(0),
747 }
748 }
749
750 pub fn state(&self) -> &TieredState {
752 &self.state
753 }
754
755 pub fn state_mut(&mut self) -> &mut TieredState {
757 &mut self.state
758 }
759
760 pub fn version(&self) -> u64 {
762 self.version.load(Ordering::Acquire)
763 }
764
765 pub fn bump_version(&self) {
767 self.version.fetch_add(1, Ordering::AcqRel);
768 }
769
770 pub fn read(&self, tier: Tier) -> bool {
773 self.state.has_valid_copy(tier)
774 }
775
776 pub fn write(&mut self, tier: Tier, policy: WritePolicy) {
778 self.state.record_write(tier, policy);
779 self.bump_version();
780 }
781
782 pub fn needs_sync(&self) -> bool {
784 self.state.needs_sync()
785 }
786}
787
788#[derive(Debug, Default)]
790pub struct SyncProtocol {
791 epoch: AtomicU64,
793 policy: WritePolicy,
795 pending: std::sync::RwLock<Vec<(u64, Tier, Tier)>>,
797}
798
799impl SyncProtocol {
800 pub fn new() -> Self {
802 Self {
803 epoch: AtomicU64::new(0),
804 policy: WritePolicy::default(),
805 pending: std::sync::RwLock::new(Vec::new()),
806 }
807 }
808
809 pub fn with_policy(policy: WritePolicy) -> Self {
811 Self {
812 epoch: AtomicU64::new(0),
813 policy,
814 pending: std::sync::RwLock::new(Vec::new()),
815 }
816 }
817
818 pub fn epoch(&self) -> u64 {
820 self.epoch.load(Ordering::Acquire)
821 }
822
823 pub fn advance_epoch(&self) -> u64 {
825 self.epoch.fetch_add(1, Ordering::AcqRel)
826 }
827
828 pub fn policy(&self) -> WritePolicy {
830 self.policy
831 }
832
833 pub fn queue_sync(&self, block_id: u64, from: Tier, to: Tier) {
835 let mut pending = self
836 .pending
837 .write()
838 .expect("SyncProtocol pending lock poisoned in queue_sync");
839 pending.push((block_id, from, to));
840 }
841
842 pub fn drain_pending(&self) -> Vec<(u64, Tier, Tier)> {
844 let mut pending = self
845 .pending
846 .write()
847 .expect("SyncProtocol pending lock poisoned in drain_pending");
848 std::mem::take(&mut *pending)
849 }
850
851 pub fn has_pending(&self) -> bool {
853 let pending = self
854 .pending
855 .read()
856 .expect("SyncProtocol pending lock poisoned in has_pending");
857 !pending.is_empty()
858 }
859
860 pub fn pending_count(&self) -> usize {
862 let pending = self
863 .pending
864 .read()
865 .expect("SyncProtocol pending lock poisoned in pending_count");
866 pending.len()
867 }
868}
869
870#[derive(Clone, Debug, Default)]
872pub struct TieredCoherencyStats {
873 pub total_blocks: usize,
875 pub vram_copies: usize,
877 pub host_copies: usize,
879 pub disk_copies: usize,
881 pub dirty_blocks: usize,
883 pub sync_count: u64,
885 pub epoch: u64,
887}
888
889#[cfg(test)]
890mod tests {
891 use super::*;
892
893 #[test]
894 fn test_coherency_state_checks() {
895 assert!(CoherencyState::HostOnly.host_is_current());
896 assert!(!CoherencyState::HostOnly.device_is_current());
897
898 assert!(CoherencyState::Synced.host_is_current());
899 assert!(CoherencyState::Synced.device_is_current());
900
901 assert!(CoherencyState::HostDirty.needs_sync());
902 assert!(CoherencyState::DeviceDirty.needs_sync());
903 assert!(!CoherencyState::Synced.needs_sync());
904 }
905
906 #[test]
907 fn test_coherent_engram_new() {
908 let data = vec![1, 2, 3, 4];
909 let engram = CoherentEngram::new(data.clone());
910
911 assert_eq!(engram.state(), CoherencyState::HostOnly);
912 assert_eq!(engram.host_data(), &data);
913 assert!(!engram.is_on_device());
914 }
915
916 #[test]
917 fn test_coherent_engram_sparse_vec_roundtrip() {
918 let vec = SparseVec {
919 pos: vec![1, 5, 10, 100],
920 neg: vec![2, 7, 50],
921 };
922
923 let engram = CoherentEngram::from_sparse_vec(&vec);
924 let recovered = engram.to_sparse_vec().unwrap();
925
926 assert_eq!(recovered.pos, vec.pos);
927 assert_eq!(recovered.neg, vec.neg);
928 }
929
930 #[test]
931 fn test_coherent_engram_modify_marks_dirty() {
932 let mut engram = CoherentEngram::new(vec![1, 2, 3]);
933 assert_eq!(engram.state(), CoherencyState::HostOnly);
934
935 engram.state = CoherencyState::Synced;
937 let _ = engram.host_data_mut();
938 assert_eq!(engram.state(), CoherencyState::HostDirty);
939 }
940
941 #[test]
942 fn test_coherency_manager() {
943 let mut manager = CoherencyManager::new();
944
945 let e1 = CoherentEngram::new(vec![1, 2, 3]);
946 let e2 = CoherentEngram::new(vec![4, 5, 6]);
947
948 let id1 = manager.register(e1);
949 let id2 = manager.register(e2);
950
951 assert!(manager.get(id1).is_some());
952 assert!(manager.get(id2).is_some());
953 assert!(manager.get(999).is_none());
954
955 let stats = manager.stats();
956 assert_eq!(stats.total, 2);
957 assert_eq!(stats.host_only, 2);
958 }
959
960 #[test]
963 fn test_tier_priority() {
964 assert_eq!(Tier::Vram.priority(), 0);
965 assert_eq!(Tier::Host.priority(), 1);
966 assert_eq!(Tier::Disk.priority(), 2);
967 }
968
969 #[test]
970 fn test_tier_mask_operations() {
971 let mut mask = TierMask::NONE;
972 assert!(!mask.any());
973 assert_eq!(mask.count(), 0);
974
975 mask.add(Tier::Host);
976 assert!(mask.has(Tier::Host));
977 assert!(!mask.has(Tier::Vram));
978 assert_eq!(mask.count(), 1);
979
980 mask.add(Tier::Vram);
981 assert!(mask.has(Tier::Host));
982 assert!(mask.has(Tier::Vram));
983 assert_eq!(mask.count(), 2);
984
985 mask.remove(Tier::Host);
986 assert!(!mask.has(Tier::Host));
987 assert!(mask.has(Tier::Vram));
988 assert_eq!(mask.count(), 1);
989 }
990
991 #[test]
992 fn test_tier_mask_fastest() {
993 let mut mask = TierMask::NONE;
994 assert_eq!(mask.fastest(), None);
995
996 mask.add(Tier::Disk);
997 assert_eq!(mask.fastest(), Some(Tier::Disk));
998
999 mask.add(Tier::Host);
1000 assert_eq!(mask.fastest(), Some(Tier::Host));
1001
1002 mask.add(Tier::Vram);
1003 assert_eq!(mask.fastest(), Some(Tier::Vram));
1004 }
1005
1006 #[test]
1007 fn test_tier_mask_union() {
1008 let a = TierMask::HOST;
1009 let b = TierMask::DISK;
1010 let union = a.union(b);
1011
1012 assert!(union.has(Tier::Host));
1013 assert!(union.has(Tier::Disk));
1014 assert!(!union.has(Tier::Vram));
1015 }
1016
1017 #[test]
1018 fn test_tiered_state_new() {
1019 let state = TieredState::host_resident();
1020 assert!(state.is_valid(Tier::Host));
1021 assert!(!state.is_valid(Tier::Vram));
1022 assert!(!state.is_valid(Tier::Disk));
1023 assert!(!state.needs_sync());
1024 }
1025
1026 #[test]
1027 fn test_tiered_state_write_back() {
1028 let mut state = TieredState::host_resident();
1029
1030 state.record_write(Tier::Vram, WritePolicy::WriteBack);
1032
1033 assert!(state.is_valid(Tier::Vram));
1034 assert!(!state.is_valid(Tier::Host)); assert!(state.is_dirty(Tier::Vram));
1036 assert!(state.needs_sync());
1037 assert_eq!(state.owner(), Some(Tier::Vram));
1038 }
1039
1040 #[test]
1041 fn test_tiered_state_write_through() {
1042 let mut state = TieredState::host_resident();
1043
1044 state.record_write(Tier::Vram, WritePolicy::WriteThrough);
1046
1047 assert!(state.is_valid(Tier::Vram));
1048 assert!(state.is_valid(Tier::Host)); assert!(!state.needs_sync()); }
1051
1052 #[test]
1053 fn test_tiered_state_write_all() {
1054 let mut state = TieredState::host_resident();
1055
1056 state.mark_synced(Tier::Vram);
1058 state.mark_synced(Tier::Disk);
1059 assert!(state.is_valid(Tier::Host));
1060 assert!(state.is_valid(Tier::Vram));
1061 assert!(state.is_valid(Tier::Disk));
1062
1063 state.record_write(Tier::Host, WritePolicy::WriteAll);
1065
1066 assert!(state.is_valid(Tier::Host));
1068 assert!(!state.needs_sync()); assert_eq!(state.owner(), Some(Tier::Host));
1072 }
1073
1074 #[test]
1075 fn test_has_valid_copy() {
1076 let state = TieredState::host_resident();
1077
1078 assert!(state.has_valid_copy(Tier::Host));
1079 assert!(!state.has_valid_copy(Tier::Vram));
1080 assert!(!state.has_valid_copy(Tier::Disk));
1081 }
1082
1083 #[test]
1084 fn test_tiered_state_sync() {
1085 let mut state = TieredState::host_resident();
1086 state.record_write(Tier::Vram, WritePolicy::WriteBack);
1087
1088 let synced = state.sync_to(Tier::Host);
1090 assert!(synced);
1091 assert!(state.is_valid(Tier::Host));
1092 assert!(state.is_valid(Tier::Vram));
1093 assert!(!state.needs_sync());
1094 }
1095
1096 #[test]
1097 fn test_tiered_state_invalidate() {
1098 let mut state = TieredState::host_resident();
1099 state.mark_synced(Tier::Vram);
1100 assert!(state.is_valid(Tier::Vram));
1101
1102 state.invalidate(Tier::Vram);
1103 assert!(!state.is_valid(Tier::Vram));
1104 assert!(state.is_valid(Tier::Host));
1105 }
1106
1107 #[test]
1108 fn test_tiered_block() {
1109 let mut block = TieredBlock::new_host(1, 1024);
1110 assert_eq!(block.id, 1);
1111 assert_eq!(block.size, 1024);
1112 assert_eq!(block.version(), 0);
1113
1114 block.write(Tier::Host, WritePolicy::WriteBack);
1115 assert_eq!(block.version(), 1);
1116 assert!(block.needs_sync());
1117 }
1118
1119 #[test]
1120 fn test_sync_protocol() {
1121 let protocol = SyncProtocol::new();
1122 assert_eq!(protocol.epoch(), 0);
1123 assert!(!protocol.has_pending());
1124
1125 protocol.queue_sync(1, Tier::Vram, Tier::Host);
1126 protocol.queue_sync(2, Tier::Host, Tier::Disk);
1127 assert!(protocol.has_pending());
1128 assert_eq!(protocol.pending_count(), 2);
1129
1130 let pending = protocol.drain_pending();
1131 assert_eq!(pending.len(), 2);
1132 assert!(!protocol.has_pending());
1133
1134 let new_epoch = protocol.advance_epoch();
1135 assert_eq!(new_epoch, 0);
1136 assert_eq!(protocol.epoch(), 1);
1137 }
1138
1139 #[test]
1140 fn test_sync_protocol_policy() {
1141 let default = SyncProtocol::new();
1142 assert_eq!(default.policy(), WritePolicy::WriteBack);
1143
1144 let writethrough = SyncProtocol::with_policy(WritePolicy::WriteThrough);
1145 assert_eq!(writethrough.policy(), WritePolicy::WriteThrough);
1146 }
1147}