scirs2_core/memory_efficient/
prefetch.rs1use std::collections::{HashSet, VecDeque};
15#[cfg(feature = "memory_compression")]
16use std::sync::{Arc, Mutex};
17use std::time::{Duration, Instant};
18
19#[cfg(feature = "memory_compression")]
20use super::compressed_memmap::CompressedMemMappedArray;
21use crate::error::CoreResult;
22#[cfg(feature = "memory_compression")]
23use crate::error::{CoreError, ErrorContext};
24
25#[derive(Debug, Clone, Copy, PartialEq, Eq)]
27pub enum AccessPattern {
28 Sequential,
30
31 Strided(usize),
33
34 Random,
36
37 Custom,
39}
40
41#[derive(Debug, Clone)]
43pub struct PrefetchConfig {
44 pub enabled: bool,
46
47 pub prefetch_count: usize,
49
50 pub history_size: usize,
52
53 pub min_pattern_length: usize,
55
56 pub async_prefetch: bool,
58
59 pub prefetch_timeout: Duration,
61}
62
63impl Default for PrefetchConfig {
64 fn default() -> Self {
65 Self {
66 enabled: true,
67 prefetch_count: 2,
68 history_size: 32,
69 min_pattern_length: 4,
70 async_prefetch: true,
71 prefetch_timeout: Duration::from_millis(100),
72 }
73 }
74}
75
76#[derive(Debug, Clone, Default)]
78pub struct PrefetchConfigBuilder {
79 config: PrefetchConfig,
80}
81
82impl PrefetchConfigBuilder {
83 pub fn new() -> Self {
85 Self::default()
86 }
87
88 pub const fn enabled(mut self, enabled: bool) -> Self {
90 self.config.enabled = enabled;
91 self
92 }
93
94 pub const fn prefetch_count(mut self, count: usize) -> Self {
96 self.config.prefetch_count = count;
97 self
98 }
99
100 pub const fn history_size(mut self, size: usize) -> Self {
102 self.config.history_size = size;
103 self
104 }
105
106 pub const fn min_pattern_length(mut self, length: usize) -> Self {
108 self.config.min_pattern_length = length;
109 self
110 }
111
112 pub const fn async_prefetch(mut self, asyncprefetch: bool) -> Self {
114 self.config.async_prefetch = asyncprefetch;
115 self
116 }
117
118 pub const fn prefetch_timeout(mut self, timeout: Duration) -> Self {
120 self.config.prefetch_timeout = timeout;
121 self
122 }
123
124 pub fn build(self) -> PrefetchConfig {
126 self.config
127 }
128}
129
130pub trait AccessPatternTracker: std::fmt::Debug {
132 fn record_access(&mut self, blockidx: usize);
134
135 fn predict_next_blocks(&self, count: usize) -> Vec<usize>;
137
138 fn current_pattern(&self) -> AccessPattern;
140
141 fn clear_history(&mut self);
143}
144
145#[derive(Debug)]
147pub struct BlockAccessTracker {
148 config: PrefetchConfig,
150
151 history: VecDeque<usize>,
153
154 current_pattern: AccessPattern,
156
157 stride: Option<usize>,
159
160 last_update: Instant,
162}
163
164impl BlockAccessTracker {
165 pub fn new(config: PrefetchConfig) -> Self {
167 let history_size = config.history_size;
168 Self {
169 config,
170 history: VecDeque::with_capacity(history_size),
171 current_pattern: AccessPattern::Random,
172 stride: None,
173 last_update: Instant::now(),
174 }
175 }
176
177 fn detect_pattern(&mut self) {
179 if self.history.len() < self.config.min_pattern_length {
180 self.current_pattern = AccessPattern::Random;
182 return;
183 }
184
185 let mut is_sequential = true;
187 let mut prev = *self.history.front().expect("Operation failed");
188
189 for &block_idx in self.history.iter().skip(1) {
190 if block_idx != prev + 1 {
191 is_sequential = false;
192 break;
193 }
194 prev = block_idx;
195 }
196
197 if is_sequential {
198 self.current_pattern = AccessPattern::Sequential;
199 return;
200 }
201
202 let mut is_strided = true;
204 let stride = self.history.get(1).expect("Operation failed")
205 - self.history.front().expect("Operation failed");
206 prev = *self.history.front().expect("Operation failed");
207
208 for &block_idx in self.history.iter().skip(1) {
209 if block_idx != prev + stride {
210 is_strided = false;
211 break;
212 }
213 prev = block_idx;
214 }
215
216 if is_strided {
217 self.current_pattern = AccessPattern::Strided(stride);
218 self.stride = Some(stride);
219 return;
220 }
221
222 self.current_pattern = AccessPattern::Random;
224 }
225}
226
227impl AccessPatternTracker for BlockAccessTracker {
228 fn record_access(&mut self, blockidx: usize) {
229 self.history.push_back(blockidx);
231
232 if self.history.len() > self.config.history_size {
233 self.history.pop_front();
234 }
235
236 if self.history.len() >= self.config.min_pattern_length {
238 self.detect_pattern();
239 }
240
241 self.last_update = Instant::now();
243 }
244
245 fn predict_next_blocks(&self, count: usize) -> Vec<usize> {
246 if self.history.is_empty() {
247 return Vec::new();
248 }
249
250 let mut predictions = Vec::with_capacity(count);
251 let latest = *self.history.back().expect("Operation failed");
252
253 match self.current_pattern {
254 AccessPattern::Sequential => {
255 for i in 1..=count {
257 predictions.push(latest + i);
258 }
259 }
260 AccessPattern::Strided(stride) => {
261 for i in 1..=count {
263 predictions.push(latest + stride * i);
264 }
265 }
266 _ => {
267 if latest > 0 {
270 predictions.push(latest - 1);
271 }
272 predictions.push(latest + 1);
273
274 let mut offset = 2;
276 while predictions.len() < count {
277 if latest >= offset {
278 predictions.push(latest - offset);
279 }
280 predictions.push(latest + offset);
281 offset += 1;
282 }
283
284 predictions.truncate(count);
286 }
287 }
288
289 predictions
290 }
291
292 fn current_pattern(&self) -> AccessPattern {
293 self.current_pattern
294 }
295
296 fn clear_history(&mut self) {
297 self.history.clear();
298 self.current_pattern = AccessPattern::Random;
299 self.stride = None;
300 }
301}
302
303#[derive(Debug)]
305#[allow(dead_code)]
306pub struct PrefetchingState {
307 config: PrefetchConfig,
309
310 tracker: Box<dyn AccessPatternTracker + Send + Sync>,
312
313 prefetching: HashSet<usize>,
315
316 prefetched: HashSet<usize>,
318
319 #[allow(dead_code)]
321 stats: PrefetchStats,
322}
323
324#[derive(Debug, Default, Clone)]
326pub struct PrefetchStats {
327 pub prefetch_count: usize,
329
330 pub prefetch_hits: usize,
332
333 pub prefetch_misses: usize,
335
336 pub hit_rate: f64,
338}
339
340impl PrefetchingState {
341 #[allow(dead_code)]
343 pub fn new(config: PrefetchConfig) -> Self {
344 Self {
345 tracker: Box::new(BlockAccessTracker::new(config.clone())),
346 config,
347 prefetching: HashSet::new(),
348 prefetched: HashSet::new(),
349 stats: PrefetchStats::default(),
350 }
351 }
352
353 #[allow(dead_code)]
355 pub fn idx(&mut self, blockidx: usize) {
356 self.tracker.record_access(blockidx);
357
358 if self.prefetched.contains(&blockidx) {
360 self.stats.prefetch_hits += 1;
361 self.prefetched.remove(&blockidx);
362 } else {
363 self.stats.prefetch_misses += 1;
364 }
365
366 let total = self.stats.prefetch_hits + self.stats.prefetch_misses;
368 if total > 0 {
369 self.stats.hit_rate = self.stats.prefetch_hits as f64 / total as f64;
370 }
371 }
372
373 #[allow(dead_code)]
375 pub fn get_blocks_to_prefetch(&self) -> Vec<usize> {
376 if !self.config.enabled {
377 return Vec::new();
378 }
379
380 let predicted = self.tracker.predict_next_blocks(self.config.prefetch_count);
382
383 predicted
385 .into_iter()
386 .filter(|&block_idx| {
387 !self.prefetched.contains(&block_idx) && !self.prefetching.contains(&block_idx)
388 })
389 .collect()
390 }
391
392 #[allow(dead_code)]
394 pub fn idx_2(&mut self, blockidx: usize) {
395 self.prefetching.insert(blockidx);
396 }
397
398 #[allow(dead_code)]
400 pub fn idx_3(&mut self, blockidx: usize) {
401 self.prefetching.remove(&blockidx);
402 self.prefetched.insert(blockidx);
403 self.stats.prefetch_count += 1;
404 }
405
406 #[allow(dead_code)]
408 pub fn stats(&self) -> PrefetchStats {
409 self.stats.clone()
410 }
411}
412
413pub trait Prefetching {
415 fn enable_prefetching(&mut self, config: PrefetchConfig) -> CoreResult<()>;
417
418 fn disable_prefetching(&mut self) -> CoreResult<()>;
420
421 fn prefetch_stats(&self) -> CoreResult<PrefetchStats>;
423
424 fn prefetch_block_by_idx_by_idx(&mut self, idx: usize) -> CoreResult<()>;
426
427 fn prefetch_indices(&mut self, indices: &[usize]) -> CoreResult<()>;
429
430 fn clear_prefetch_state(&mut self) -> CoreResult<()>;
432}
433
434#[cfg(feature = "memory_compression")]
436#[derive(Debug)]
437pub struct PrefetchingCompressedArray<A: Clone + Copy + 'static + Send + Sync> {
438 array: CompressedMemMappedArray<A>,
440
441 prefetch_state: Arc<Mutex<PrefetchingState>>,
443
444 prefetching_enabled: bool,
446
447 #[allow(dead_code)] prefetch_thread: Option<std::thread::JoinHandle<()>>,
450
451 #[allow(dead_code)] prefetch_sender: Option<std::sync::mpsc::Sender<PrefetchCommand>>,
454}
455
456#[cfg(feature = "memory_compression")]
458enum PrefetchCommand {
459 Prefetch(usize),
461
462 Stop,
464}
465
466#[cfg(feature = "memory_compression")]
467impl<A: Clone + Copy + 'static + Send + Sync> PrefetchingCompressedArray<A> {
468 pub fn new(array: CompressedMemMappedArray<A>) -> Self {
470 let prefetch_state = Arc::new(Mutex::new(PrefetchingState::new(PrefetchConfig::default())));
472
473 Self {
474 array,
475 prefetch_state,
476 prefetching_enabled: false,
477 prefetch_thread: None,
478 prefetch_sender: None,
479 }
480 }
481
482 pub fn new_with_config(
484 array: CompressedMemMappedArray<A>,
485 config: PrefetchConfig,
486 ) -> CoreResult<Self> {
487 let mut prefetching_array = Self::new(array);
488 prefetching_array.enable_prefetching(config)?;
489 Ok(prefetching_array)
490 }
491
492 fn start_background_prefetching(
494 &mut self,
495 state: Arc<Mutex<PrefetchingState>>,
496 ) -> CoreResult<()> {
497 let (sender, receiver) = std::sync::mpsc::channel();
499 self.prefetch_sender = Some(sender);
500
501 let array = self.array.clone();
503 let prefetch_state = state.clone();
504
505 let timeout = {
507 let guard = self.prefetch_state.lock().map_err(|_| {
508 CoreError::MutexError(ErrorContext::new(
509 "Failed to lock prefetch _state".to_string(),
510 ))
511 })?;
512 guard.config.prefetch_timeout
513 };
514
515 let thread = std::thread::spawn(move || {
517 loop {
519 match receiver.recv_timeout(timeout) {
521 Ok(PrefetchCommand::Prefetch(block_idx)) => {
522 {
524 if let Ok(mut guard) = prefetch_state.lock() {
525 guard.idx_2(block_idx);
526 }
527 }
528
529 if array.preload_block(block_idx).is_ok() {
531 if let Ok(mut guard) = prefetch_state.lock() {
533 guard.idx_3(block_idx);
534 }
535 }
536 }
537 Ok(PrefetchCommand::Stop) => {
538 break;
540 }
541 Err(std::sync::mpsc::RecvTimeoutError::Timeout) => {
542 if let Ok(guard) = prefetch_state.lock() {
544 let blocks = guard.get_blocks_to_prefetch();
545
546 if !blocks.is_empty() {
549 drop(guard);
550
551 for &block_idx in &blocks {
552 if let Ok(mut guard) = prefetch_state.lock() {
554 guard.idx_2(block_idx);
555 }
556
557 if array.preload_block(block_idx).is_ok() {
559 if let Ok(mut guard) = prefetch_state.lock() {
561 guard.idx_3(block_idx);
562 }
563 }
564 }
565 }
566 }
567 }
568 Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => {
569 break;
571 }
572 }
573 }
574 });
575
576 self.prefetch_thread = Some(thread);
577 Ok(())
578 }
579
580 fn stop_prefetch_thread(&mut self) -> CoreResult<()> {
582 if let Some(sender) = self.prefetch_sender.take() {
583 sender.send(PrefetchCommand::Stop).map_err(|_| {
585 CoreError::ThreadError(ErrorContext::new("Failed to send stop command".to_string()))
586 })?;
587
588 if let Some(thread) = self.prefetch_thread.take() {
590 thread.join().map_err(|_| {
591 CoreError::ThreadError(ErrorContext::new(
592 "Failed to join prefetch thread".to_string(),
593 ))
594 })?;
595 }
596 }
597
598 Ok(())
599 }
600
601 pub const fn inner(&self) -> &CompressedMemMappedArray<A> {
603 &self.array
604 }
605
606 pub fn inner_mut(&mut self) -> &mut CompressedMemMappedArray<A> {
608 &mut self.array
609 }
610
611 fn request_prefetch(&self, blockidx: usize) -> CoreResult<()> {
613 if let Some(sender) = &self.prefetch_sender {
614 sender
615 .send(PrefetchCommand::Prefetch(blockidx))
616 .map_err(|_| {
617 CoreError::ThreadError(ErrorContext::new(
618 "Failed to send prefetch command".to_string(),
619 ))
620 })?;
621 }
622
623 Ok(())
624 }
625}
626
627#[cfg(feature = "memory_compression")]
628impl<A: Clone + Copy + 'static + Send + Sync> Prefetching for PrefetchingCompressedArray<A> {
629 fn enable_prefetching(&mut self, config: PrefetchConfig) -> CoreResult<()> {
630 if self.prefetching_enabled {
632 let current_config = {
634 let guard = self.prefetch_state.lock().map_err(|_| {
635 CoreError::MutexError(ErrorContext::new(
636 "Failed to lock prefetch state".to_string(),
637 ))
638 })?;
639 guard.config.clone()
640 };
641
642 if current_config.async_prefetch == config.async_prefetch
643 && current_config.prefetch_count == config.prefetch_count
644 && current_config.history_size == config.history_size
645 {
646 let mut guard = self.prefetch_state.lock().map_err(|_| {
648 CoreError::MutexError(ErrorContext::new(
649 "Failed to lock prefetch state".to_string(),
650 ))
651 })?;
652 guard.config = config;
653 return Ok(());
654 }
655
656 self.disable_prefetching()?;
658 }
659
660 let prefetch_state = Arc::new(Mutex::new(PrefetchingState::new(config.clone())));
662 self.prefetch_state = prefetch_state.clone();
663
664 if config.async_prefetch {
666 self.start_background_prefetching(prefetch_state)?;
667 }
668
669 self.prefetching_enabled = true;
670 Ok(())
671 }
672
673 fn disable_prefetching(&mut self) -> CoreResult<()> {
674 if self.prefetching_enabled {
675 self.stop_prefetch_thread()?;
677
678 let mut guard = self.prefetch_state.lock().map_err(|_| {
680 CoreError::MutexError(ErrorContext::new(
681 "Failed to lock prefetch state".to_string(),
682 ))
683 })?;
684
685 guard.config.enabled = false;
687
688 self.prefetching_enabled = false;
689 }
690
691 Ok(())
692 }
693
694 fn prefetch_stats(&self) -> CoreResult<PrefetchStats> {
695 let guard = self.prefetch_state.lock().map_err(|_| {
696 CoreError::MutexError(ErrorContext::new(
697 "Failed to lock prefetch state".to_string(),
698 ))
699 })?;
700
701 Ok(guard.stats())
702 }
703
704 fn prefetch_block_by_idx_by_idx(&mut self, blockidx: usize) -> CoreResult<()> {
705 if !self.prefetching_enabled {
706 return Ok(());
707 }
708
709 let should_prefetch = {
711 let guard = self.prefetch_state.lock().map_err(|_| {
712 CoreError::MutexError(ErrorContext::new(
713 "Failed to lock prefetch state".to_string(),
714 ))
715 })?;
716
717 !guard.prefetched.contains(&blockidx) && !guard.prefetching.contains(&blockidx)
718 };
719
720 if should_prefetch {
721 let is_async = {
723 let guard = self.prefetch_state.lock().map_err(|_| {
724 CoreError::MutexError(ErrorContext::new(
725 "Failed to lock prefetch state".to_string(),
726 ))
727 })?;
728
729 guard.config.async_prefetch
730 };
731
732 if is_async {
733 self.request_prefetch(blockidx)?;
735 } else {
736 {
738 let mut guard = self.prefetch_state.lock().map_err(|_| {
739 CoreError::MutexError(ErrorContext::new(
740 "Failed to lock prefetch state".to_string(),
741 ))
742 })?;
743
744 guard.idx_2(blockidx);
745 }
746
747 self.array.preload_block(blockidx)?;
749
750 let mut guard = self.prefetch_state.lock().map_err(|_| {
752 CoreError::MutexError(ErrorContext::new(
753 "Failed to lock prefetch state".to_string(),
754 ))
755 })?;
756
757 guard.idx_3(blockidx);
758 }
759 }
760
761 Ok(())
762 }
763
764 fn prefetch_indices(&mut self, indices: &[usize]) -> CoreResult<()> {
765 if !self.prefetching_enabled {
766 return Ok(());
767 }
768
769 for &block_idx in indices {
770 self.prefetch_block_by_idx_by_idx(block_idx)?;
771 }
772
773 Ok(())
774 }
775
776 fn clear_prefetch_state(&mut self) -> CoreResult<()> {
777 let mut guard = self.prefetch_state.lock().map_err(|_| {
778 CoreError::MutexError(ErrorContext::new(
779 "Failed to lock prefetch state".to_string(),
780 ))
781 })?;
782
783 guard.prefetched.clear();
784 guard.prefetching.clear();
785 guard.tracker.clear_history();
786
787 Ok(())
788 }
789}
790
791#[cfg(feature = "memory_compression")]
793impl<A: Clone + Copy + 'static + Send + Sync> CompressedMemMappedArray<A> {
794 pub fn with_prefetching(self) -> PrefetchingCompressedArray<A> {
796 PrefetchingCompressedArray::new(self)
797 }
798
799 pub fn with_prefetching_config(
801 self,
802 config: PrefetchConfig,
803 ) -> CoreResult<PrefetchingCompressedArray<A>> {
804 PrefetchingCompressedArray::new_with_config(self, config)
805 }
806}
807
808#[cfg(feature = "memory_compression")]
810impl<A> std::ops::Deref for PrefetchingCompressedArray<A>
811where
812 A: Clone + Copy + 'static + Send + Sync,
813{
814 type Target = CompressedMemMappedArray<A>;
815
816 fn deref(&self) -> &Self::Target {
817 &self.array
818 }
819}
820
821#[cfg(feature = "memory_compression")]
823impl<A: Clone + Copy + 'static + Send + Sync> PrefetchingCompressedArray<A> {
824 pub fn get(&self, indices: &[usize]) -> CoreResult<A> {
826 let flat_index = self.calculate_flat_index(indices)?;
828 let block_idx = flat_index / self.metadata().block_size;
829
830 if self.prefetching_enabled {
832 let mut guard = self.prefetch_state.lock().map_err(|_| {
833 CoreError::MutexError(ErrorContext::new(
834 "Failed to lock prefetch state".to_string(),
835 ))
836 })?;
837
838 guard.idx(block_idx);
839
840 let to_prefetch = guard.get_blocks_to_prefetch();
842
843 drop(guard);
845
846 }
852
853 self.array.get(indices)
855 }
856
857 fn calculate_flat_index(&self, indices: &[usize]) -> CoreResult<usize> {
859 if indices.len() != self.metadata().shape.len() {
861 return Err(CoreError::DimensionError(ErrorContext::new(format!(
862 "Expected {} indices, got {}",
863 self.metadata().shape.len(),
864 indices.len()
865 ))));
866 }
867
868 for (_, &idx) in indices.iter().enumerate() {
869 if idx >= self.metadata().shape[0] {
870 return Err(CoreError::IndexError(ErrorContext::new(format!(
871 "Index {} out of bounds for dimension {} (max {})",
872 idx,
873 0,
874 self.metadata().shape[0] - 1
875 ))));
876 }
877 }
878
879 let mut flat_index = 0;
881 let mut stride = 1;
882 for i in (0..indices.len()).rev() {
883 flat_index += indices[i] * stride;
884 if i > 0 {
885 stride *= self.metadata().shape[i];
886 }
887 }
888
889 Ok(flat_index)
890 }
891
892 pub fn slice(
894 &self,
895 ranges: &[(usize, usize)],
896 ) -> CoreResult<crate::ndarray::Array<A, crate::ndarray::IxDyn>> {
897 if self.prefetching_enabled {
899 let blocks = self.calculate_blocks_for_slice(ranges)?;
901
902 let mut guard = self.prefetch_state.lock().map_err(|_| {
904 CoreError::MutexError(ErrorContext::new(
905 "Failed to lock prefetch state".to_string(),
906 ))
907 })?;
908
909 for &block_idx in &blocks {
911 guard.idx(block_idx);
912 }
913
914 let to_prefetch = guard.get_blocks_to_prefetch();
916
917 drop(guard);
919
920 }
926
927 self.array.slice(ranges)
929 }
930
931 fn calculate_blocks_for_slice(&self, ranges: &[(usize, usize)]) -> CoreResult<HashSet<usize>> {
933 if ranges.len() != self.metadata().shape.len() {
935 return Err(CoreError::DimensionError(ErrorContext::new(format!(
936 "Expected {} ranges, got {}",
937 self.metadata().shape.len(),
938 ranges.len()
939 ))));
940 }
941
942 let mut resultshape = Vec::with_capacity(ranges.len());
944 for (_, &(start, end)) in ranges.iter().enumerate() {
945 if start >= end {
946 return Err(CoreError::ValueError(ErrorContext::new(format!(
947 "Invalid range for dimension {}: {}..{}",
948 0, start, end
949 ))));
950 }
951 if end > self.metadata().shape[0] {
952 return Err(CoreError::IndexError(ErrorContext::new(format!(
953 "Range {}..{} out of bounds for dimension {} (max {})",
954 start,
955 end,
956 0,
957 self.metadata().shape[0]
958 ))));
959 }
960 resultshape.push(end - start);
961 }
962
963 let mut strides = Vec::with_capacity(self.metadata().shape.len());
965 let mut stride = 1;
966 for i in (0..self.metadata().shape.len()).rev() {
967 strides.push(stride);
968 if i > 0 {
969 stride *= self.metadata().shape[i];
970 }
971 }
972 strides.reverse();
973
974 let mut blocks = HashSet::new();
976 let block_size = self.metadata().block_size;
977
978 let mut corners = Vec::with_capacity(1 << ranges.len());
980 corners.push(vec![0; ranges.len()]);
981
982 for dim in 0..ranges.len() {
983 let mut new_corners = Vec::new();
984 for corner in &corners {
985 let mut corner1 = corner.clone();
986 let mut corner2 = corner.clone();
987 corner1[dim] = 0;
988 corner2[dim] = resultshape[dim] - 1;
989 new_corners.push(corner1);
990 new_corners.push(corner2);
991 }
992 corners = new_corners;
993 }
994
995 for corner in corners {
997 let mut flat_index = 0;
998 for (dim, &offset) in corner.iter().enumerate() {
999 flat_index += (ranges[dim].0 + offset) * strides[dim];
1000 }
1001
1002 let block_idx = flat_index / block_size;
1003 blocks.insert(block_idx);
1004 }
1005
1006 if blocks.len() > 1 {
1009 let min_block = *blocks.iter().min().expect("Operation failed");
1010 let max_block = *blocks.iter().max().expect("Operation failed");
1011
1012 for block_idx in min_block..=max_block {
1014 blocks.insert(block_idx);
1015 }
1016 }
1017
1018 Ok(blocks)
1019 }
1020}
1021
1022#[cfg(test)]
1023mod tests {
1024 use super::*;
1025
1026 #[test]
1027 fn test_access_pattern_detection_sequential() {
1028 let config = PrefetchConfig {
1029 min_pattern_length: 4,
1030 ..Default::default()
1031 };
1032
1033 let mut tracker = BlockAccessTracker::new(config);
1034
1035 for i in 0..10 {
1037 tracker.record_access(i);
1038 }
1039
1040 assert_eq!(tracker.current_pattern(), AccessPattern::Sequential);
1042
1043 let predictions = tracker.predict_next_blocks(3);
1045 assert_eq!(predictions, vec![10, 11, 12]);
1046 }
1047
1048 #[test]
1049 fn test_access_pattern_detection_strided() {
1050 let config = PrefetchConfig {
1051 min_pattern_length: 4,
1052 ..Default::default()
1053 };
1054
1055 let mut tracker = BlockAccessTracker::new(config);
1056
1057 for i in (0..30).step_by(3) {
1059 tracker.record_access(i);
1060 }
1061
1062 assert_eq!(tracker.current_pattern(), AccessPattern::Strided(3));
1064
1065 let predictions = tracker.predict_next_blocks(3);
1067 assert_eq!(predictions, vec![30, 33, 36]);
1068 }
1069
1070 #[test]
1071 fn test_prefetching_state() {
1072 let config = PrefetchConfig {
1073 prefetch_count: 3,
1074 ..Default::default()
1075 };
1076
1077 let mut state = PrefetchingState::new(config);
1078
1079 for i in 0..5 {
1081 state.idx(i);
1082 }
1083
1084 let to_prefetch = state.get_blocks_to_prefetch();
1086 assert_eq!(to_prefetch, vec![5, 6, 7]);
1087
1088 for &block in &to_prefetch {
1090 state.prefetching.insert(block);
1092 }
1093
1094 state.prefetched.insert(5);
1096 state.prefetching.remove(&5);
1097
1098 state.idx(5);
1100
1101 let stats = state.stats();
1103 assert_eq!(stats.prefetch_hits, 1);
1104 assert_eq!(stats.prefetch_misses, 5); assert!(stats.hit_rate > 0.0);
1106 }
1107}