use crate::core::error::{Error, Result};
use crate::storage::unified_memory::*;
use std::collections::{HashMap, VecDeque};
use std::sync::{Arc, Mutex, RwLock};
use std::time::{Instant, Duration};
use std::fs::File;
use std::io::{Read, Write, Seek, SeekFrom};
#[derive(Debug, Clone)]
pub struct MemoryMappedConfig {
pub page_size: usize,
pub prefetch_ahead: bool,
pub prefetch_distance: u8,
pub use_huge_pages: bool,
pub bypass_cache: bool,
pub column_aligned_pages: bool,
pub numa_awareness: bool,
pub max_mapped_files: usize,
}
impl Default for MemoryMappedConfig {
fn default() -> Self {
Self {
page_size: 4096, prefetch_ahead: true,
prefetch_distance: 4, use_huge_pages: false, bypass_cache: false,
column_aligned_pages: true,
numa_awareness: true,
max_mapped_files: 1000,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum PrefetchStrategy {
None,
Sequential,
Adaptive,
Streaming,
Columnar,
}
#[derive(Debug, Clone)]
pub struct AccessPatternFeatures {
pub recent_accesses: Vec<u64>,
pub access_intervals: Vec<Duration>,
pub stride_pattern: Vec<i64>,
pub frequency_distribution: HashMap<u64, u32>,
pub sequential_ratio: f64,
pub random_ratio: f64,
pub temporal_locality: f64,
pub spatial_locality: f64,
}
impl AccessPatternFeatures {
pub fn new() -> Self {
Self {
recent_accesses: Vec::new(),
access_intervals: Vec::new(),
stride_pattern: Vec::new(),
frequency_distribution: HashMap::new(),
sequential_ratio: 0.0,
random_ratio: 0.0,
temporal_locality: 0.0,
spatial_locality: 0.0,
}
}
}
#[derive(Debug, Clone)]
pub struct PredictionResult {
pub predicted_offsets: Vec<u64>,
pub confidence_scores: Vec<f64>,
pub prefetch_ranges: Vec<ChunkRange>,
}
#[derive(Debug, Clone)]
pub struct TrainingSample {
pub features: AccessPatternFeatures,
pub actual_access: u64,
pub accuracy: f64,
}
pub trait PredictionModel: Send + Sync {
fn predict(&self, features: &AccessPatternFeatures) -> Option<PredictionResult>;
fn add_training_sample(&mut self, sample: TrainingSample);
fn retrain(&mut self) -> Result<()>;
fn accuracy(&self) -> f64;
}
pub struct RuleBasedPredictionModel {
training_samples: Vec<TrainingSample>,
accuracy: f64,
config: ModelConfig,
}
#[derive(Debug, Clone)]
pub struct ModelConfig {
pub max_training_samples: usize,
pub min_confidence_threshold: f64,
pub max_prefetch_distance: u64,
}
impl Default for ModelConfig {
fn default() -> Self {
Self {
max_training_samples: 10000,
min_confidence_threshold: 0.6,
max_prefetch_distance: 1024 * 1024, }
}
}
impl RuleBasedPredictionModel {
pub fn new(config: ModelConfig) -> Self {
Self {
training_samples: Vec::new(),
accuracy: 0.0,
config,
}
}
fn analyze_pattern(&self, features: &AccessPatternFeatures) -> PredictionResult {
let mut predicted_offsets = Vec::new();
let mut confidence_scores = Vec::new();
if !features.recent_accesses.is_empty() {
let last_access = features.recent_accesses.last().expect("operation should succeed");
if features.sequential_ratio > 0.7 {
for i in 1..=4 {
predicted_offsets.push(last_access + (i * features.stride_pattern.last().unwrap_or(&1024).abs() as u64));
confidence_scores.push(features.sequential_ratio);
}
}
if features.stride_pattern.len() >= 2 {
let avg_stride = features.stride_pattern.iter().sum::<i64>() / features.stride_pattern.len() as i64;
if avg_stride.abs() > 0 {
predicted_offsets.push((last_access as i64 + avg_stride) as u64);
confidence_scores.push(0.8);
}
}
let mut frequent_offsets: Vec<_> = features.frequency_distribution.iter()
.filter(|(_, &count)| count > 2)
.map(|(&offset, &count)| (offset, count))
.collect();
frequent_offsets.sort_by_key(|(_, count)| std::cmp::Reverse(*count));
for (offset, count) in frequent_offsets.iter().take(2) {
if *offset != *last_access {
predicted_offsets.push(*offset);
confidence_scores.push((*count as f64 / features.recent_accesses.len() as f64).min(1.0));
}
}
}
let prefetch_ranges = predicted_offsets.iter()
.zip(confidence_scores.iter())
.filter(|(_, &confidence)| confidence >= self.config.min_confidence_threshold)
.map(|(&offset, _)| ChunkRange::new(offset as usize, (offset + 4096) as usize))
.collect();
PredictionResult {
predicted_offsets,
confidence_scores,
prefetch_ranges,
}
}
}
impl PredictionModel for RuleBasedPredictionModel {
fn predict(&self, features: &AccessPatternFeatures) -> Option<PredictionResult> {
if features.recent_accesses.is_empty() {
return None;
}
Some(self.analyze_pattern(features))
}
fn add_training_sample(&mut self, sample: TrainingSample) {
self.training_samples.push(sample);
if self.training_samples.len() > self.config.max_training_samples {
self.training_samples.remove(0);
}
}
fn retrain(&mut self) -> Result<()> {
if self.training_samples.is_empty() {
return Ok(());
}
let recent_samples = &self.training_samples[self.training_samples.len().saturating_sub(100)..];
let total_accuracy: f64 = recent_samples.iter().map(|s| s.accuracy).sum();
self.accuracy = total_accuracy / recent_samples.len() as f64;
Ok(())
}
fn accuracy(&self) -> f64 {
self.accuracy
}
}
pub struct PageFaultPredictor {
feature_extractor: AccessPatternFeatureExtractor,
model: Box<dyn PredictionModel>,
training_data: TrainingDataCollector,
update_trigger: ModelUpdateTrigger,
}
impl PageFaultPredictor {
pub fn new() -> Self {
Self {
feature_extractor: AccessPatternFeatureExtractor::new(),
model: Box::new(RuleBasedPredictionModel::new(ModelConfig::default())),
training_data: TrainingDataCollector::new(),
update_trigger: ModelUpdateTrigger::new(),
}
}
pub fn initialize_for_mapping(&mut self, _mapping: &MemoryMapping, _pattern: &AccessPattern) -> Result<()> {
self.training_data.reset();
Ok(())
}
pub fn predict_next_accesses(
&self,
access_tracker: &AccessTracker,
current_range: &ChunkRange
) -> Option<Vec<ChunkRange>> {
let features = self.feature_extractor.extract_features(access_tracker, current_range);
if let Some(prediction) = self.model.predict(&features) {
Some(prediction.prefetch_ranges)
} else {
None
}
}
pub fn record_actual_access(&mut self, predicted: &[ChunkRange], actual: &ChunkRange) {
let accuracy = self.calculate_prediction_accuracy(predicted, actual);
let features = AccessPatternFeatures::new(); let sample = TrainingSample {
features,
actual_access: actual.start as u64,
accuracy,
};
self.training_data.add_sample(sample.clone());
self.model.add_training_sample(sample);
if self.update_trigger.should_update() {
let _ = self.model.retrain();
}
}
fn calculate_prediction_accuracy(&self, predicted: &[ChunkRange], actual: &ChunkRange) -> f64 {
if predicted.is_empty() {
return 0.0;
}
for pred_range in predicted {
if ranges_overlap(pred_range, actual) {
return 1.0;
}
}
let min_distance = predicted.iter()
.map(|pred| distance_between_ranges(pred, actual))
.min_by(|a, b| a.partial_cmp(b).expect("operation should succeed"))
.unwrap_or(f64::MAX);
(1.0 / (1.0 + min_distance / 1024.0)).max(0.0)
}
}
fn ranges_overlap(range1: &ChunkRange, range2: &ChunkRange) -> bool {
range1.start < range2.end && range2.start < range1.end
}
fn distance_between_ranges(range1: &ChunkRange, range2: &ChunkRange) -> f64 {
if ranges_overlap(range1, range2) {
0.0
} else {
let gap = if range1.end <= range2.start {
range2.start - range1.end
} else {
range1.start - range2.end
};
gap as f64
}
}
pub struct AccessPatternFeatureExtractor {
window_size: usize,
}
impl AccessPatternFeatureExtractor {
pub fn new() -> Self {
Self {
window_size: 100,
}
}
pub fn extract_features(&self, access_tracker: &AccessTracker, current_range: &ChunkRange) -> AccessPatternFeatures {
let accesses = access_tracker.get_recent_accesses(self.window_size);
let mut features = AccessPatternFeatures::new();
features.recent_accesses = accesses.iter().map(|a| a.offset).collect();
for window in accesses.windows(2) {
let interval = window[1].timestamp.duration_since(window[0].timestamp).unwrap_or_default();
features.access_intervals.push(interval);
}
for window in features.recent_accesses.windows(2) {
let stride = window[1] as i64 - window[0] as i64;
features.stride_pattern.push(stride);
}
for &offset in &features.recent_accesses {
*features.frequency_distribution.entry(offset).or_insert(0) += 1;
}
features.sequential_ratio = self.calculate_sequential_ratio(&features.stride_pattern);
features.random_ratio = 1.0 - features.sequential_ratio;
features.temporal_locality = self.calculate_temporal_locality(&accesses);
features.spatial_locality = self.calculate_spatial_locality(&features.recent_accesses);
features
}
fn calculate_sequential_ratio(&self, strides: &[i64]) -> f64 {
if strides.is_empty() {
return 0.0;
}
let sequential_threshold = 8192; let sequential_count = strides.iter()
.filter(|&&stride| stride.abs() <= sequential_threshold)
.count();
sequential_count as f64 / strides.len() as f64
}
fn calculate_temporal_locality(&self, accesses: &[AccessRecord]) -> f64 {
if accesses.len() < 2 {
return 0.0;
}
let recent_window = Duration::from_secs(1);
let mut locality_score = 0.0;
for i in 1..accesses.len() {
let time_diff = accesses[i].timestamp.duration_since(accesses[i-1].timestamp).unwrap_or_default();
if time_diff < recent_window {
locality_score += 1.0;
}
}
locality_score / (accesses.len() - 1) as f64
}
fn calculate_spatial_locality(&self, offsets: &[u64]) -> f64 {
if offsets.len() < 2 {
return 0.0;
}
let nearby_threshold = 64 * 1024; let mut locality_score = 0.0;
for i in 1..offsets.len() {
let distance = (offsets[i] as i64 - offsets[i-1] as i64).abs() as u64;
if distance <= nearby_threshold {
locality_score += 1.0;
}
}
locality_score / (offsets.len() - 1) as f64
}
}
pub struct TrainingDataCollector {
samples: Vec<TrainingSample>,
max_samples: usize,
}
impl TrainingDataCollector {
pub fn new() -> Self {
Self {
samples: Vec::new(),
max_samples: 10000,
}
}
pub fn add_sample(&mut self, sample: TrainingSample) {
self.samples.push(sample);
if self.samples.len() > self.max_samples {
self.samples.remove(0);
}
}
pub fn reset(&mut self) {
self.samples.clear();
}
pub fn sample_count(&self) -> usize {
self.samples.len()
}
}
pub struct ModelUpdateTrigger {
samples_since_update: usize,
update_interval: usize,
last_update: Instant,
min_update_interval: Duration,
}
impl ModelUpdateTrigger {
pub fn new() -> Self {
Self {
samples_since_update: 0,
update_interval: 1000,
last_update: Instant::now(),
min_update_interval: Duration::from_secs(60),
}
}
pub fn should_update(&mut self) -> bool {
self.samples_since_update += 1;
let should_update = self.samples_since_update >= self.update_interval &&
self.last_update.elapsed() >= self.min_update_interval;
if should_update {
self.samples_since_update = 0;
self.last_update = Instant::now();
}
should_update
}
}
#[derive(Debug, Clone)]
pub struct AccessRecord {
pub offset: u64,
pub size: usize,
pub timestamp: Instant,
pub access_type: AccessType,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AccessType {
Read,
Write,
Prefetch,
}
pub struct AccessTracker {
accesses: VecDeque<AccessRecord>,
max_records: usize,
}
impl AccessTracker {
pub fn new() -> Self {
Self {
accesses: VecDeque::new(),
max_records: 10000,
}
}
pub fn record_access(&mut self, range: &ChunkRange) {
let record = AccessRecord {
offset: range.start as u64,
size: range.len(),
timestamp: Instant::now(),
access_type: AccessType::Read,
};
self.accesses.push_back(record);
while self.accesses.len() > self.max_records {
self.accesses.pop_front();
}
}
pub fn get_recent_accesses(&self, count: usize) -> Vec<AccessRecord> {
self.accesses.iter()
.rev()
.take(count)
.cloned()
.collect()
}
}
#[derive(Debug)]
pub struct MemoryMapping {
pub file: File,
pub size: usize,
pub base_address: u64,
pub page_size: usize,
}
impl MemoryMapping {
pub fn new(file: File, size: usize, page_size: usize) -> Self {
Self {
file,
size,
base_address: 0x1000000, page_size,
}
}
pub fn base_ptr(&self) -> *const u8 {
self.base_address as *const u8
}
}
pub struct PageManager {
config: MemoryMappedConfig,
mappings: HashMap<u64, MemoryMapping>,
next_mapping_id: u64,
}
impl PageManager {
pub fn new(config: MemoryMappedConfig) -> Self {
Self {
config,
mappings: HashMap::new(),
next_mapping_id: 1,
}
}
pub fn create_mapping(&mut self, file_size: usize, page_size: usize, _numa_node: Option<u32>) -> Result<MemoryMapping> {
let file = tempfile::tempfile()
.map_err(|e| Error::IoError(format!("Failed to create temp file: {}", e)))?;
let mapping = MemoryMapping::new(file, file_size, page_size);
let mapping_id = self.next_mapping_id;
self.next_mapping_id += 1;
self.mappings.insert(mapping_id, mapping);
let file = tempfile::tempfile()
.map_err(|e| Error::IoError(format!("Failed to create temp file: {}", e)))?;
Ok(MemoryMapping::new(file, file_size, page_size))
}
pub fn ensure_pages_loaded(&self, _mapping: &MemoryMapping, _range: &ChunkRange) -> Result<()> {
Ok(())
}
}
pub struct NumaAwareAllocator {
available_nodes: Vec<u32>,
node_assignments: HashMap<u64, u32>,
}
impl NumaAwareAllocator {
pub fn new() -> Self {
Self {
available_nodes: vec![0], node_assignments: HashMap::new(),
}
}
pub fn select_optimal_numa_node(&self) -> Option<u32> {
self.available_nodes.first().copied()
}
}
pub struct PrefetchEngine {
strategy: PrefetchStrategy,
pending_requests: Vec<ChunkRange>,
config: MemoryMappedConfig,
}
impl PrefetchEngine {
pub fn new(config: MemoryMappedConfig) -> Self {
Self {
strategy: PrefetchStrategy::Adaptive,
pending_requests: Vec::new(),
config,
}
}
pub fn set_strategy(&mut self, strategy: PrefetchStrategy) {
self.strategy = strategy;
}
pub fn schedule_prefetch(&mut self, ranges: &[ChunkRange]) -> Result<()> {
for range in ranges {
if self.pending_requests.len() < 100 { self.pending_requests.push(range.clone());
}
}
Ok(())
}
}
#[derive(Debug)]
pub struct MemoryMappedHandle {
pub mapping: MemoryMapping,
pub access_tracker: AccessTracker,
pub prefetch_state: PrefetchState,
}
#[derive(Debug)]
pub struct PrefetchState {
pub prefetched_ranges: Vec<ChunkRange>,
pub hit_rate: f64,
}
impl PrefetchState {
pub fn new() -> Self {
Self {
prefetched_ranges: Vec::new(),
hit_rate: 0.0,
}
}
}
pub struct IntelligentMemoryMappedStrategy {
page_manager: PageManager,
mapping_tracker: HashMap<u64, MemoryMapping>,
fault_predictor: Arc<Mutex<PageFaultPredictor>>,
numa_allocator: NumaAwareAllocator,
prefetch_engine: Arc<Mutex<PrefetchEngine>>,
config: MemoryMappedConfig,
next_handle_id: std::sync::atomic::AtomicU64,
}
impl IntelligentMemoryMappedStrategy {
pub fn new(config: MemoryMappedConfig) -> Self {
Self {
page_manager: PageManager::new(config.clone()),
mapping_tracker: HashMap::new(),
fault_predictor: Arc::new(Mutex::new(PageFaultPredictor::new())),
numa_allocator: NumaAwareAllocator::new(),
prefetch_engine: Arc::new(Mutex::new(PrefetchEngine::new(config.clone()))),
config,
next_handle_id: std::sync::atomic::AtomicU64::new(1),
}
}
fn estimate_file_size(&self, config: &StorageConfig) -> Result<usize> {
Ok(config.requirements.estimated_size.max(self.config.page_size))
}
fn determine_optimal_page_size(&self, _config: &StorageConfig) -> Result<usize> {
Ok(self.config.page_size)
}
fn select_optimal_numa_node(&self) -> Result<Option<u32>> {
Ok(self.numa_allocator.select_optimal_numa_node())
}
}
impl StorageStrategy for IntelligentMemoryMappedStrategy {
type Handle = MemoryMappedHandle;
type Error = Error;
type Metadata = MemoryMappedMetadata;
fn name(&self) -> &'static str {
"IntelligentMemoryMapped"
}
fn create_storage(&mut self, config: &StorageConfig) -> Result<Self::Handle> {
let file_size = self.estimate_file_size(config)?;
let page_size = self.determine_optimal_page_size(config)?;
let numa_node = self.select_optimal_numa_node()?;
let mapping = self.page_manager.create_mapping(file_size, page_size, numa_node)?;
if let Ok(mut predictor) = self.fault_predictor.lock() {
predictor.initialize_for_mapping(&mapping, &config.expected_access_pattern)?;
}
let handle = MemoryMappedHandle {
mapping,
access_tracker: AccessTracker::new(),
prefetch_state: PrefetchState::new(),
};
Ok(handle)
}
fn read_chunk(&self, handle: &Self::Handle, range: ChunkRange) -> Result<DataChunk> {
let mut access_tracker = &handle.access_tracker;
if let Ok(predictor) = self.fault_predictor.lock() {
if let Some(prefetch_ranges) = predictor.predict_next_accesses(&access_tracker, &range) {
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
let _ = prefetch_engine.schedule_prefetch(&prefetch_ranges);
}
}
}
self.page_manager.ensure_pages_loaded(&handle.mapping, &range)?;
let data = vec![0u8; range.len()];
Ok(DataChunk::from_slice(&data))
}
fn write_chunk(&mut self, handle: &Self::Handle, chunk: DataChunk) -> Result<()> {
if chunk.len() > handle.mapping.size {
return Err(Error::InvalidOperation("Chunk too large for mapping".to_string()));
}
Ok(())
}
fn append_chunk(&mut self, handle: &Self::Handle, chunk: DataChunk) -> Result<()> {
self.write_chunk(handle, chunk)
}
fn flush(&mut self, _handle: &Self::Handle) -> Result<()> {
Ok(())
}
fn delete_storage(&mut self, _handle: &Self::Handle) -> Result<()> {
Ok(())
}
fn can_handle(&self, requirements: &StorageRequirements) -> StrategyCapability {
let can_handle = match requirements.access_pattern {
AccessPattern::Sequential | AccessPattern::Streaming => true,
_ => requirements.estimated_size > 10 * 1024 * 1024, };
let confidence = if can_handle { 0.85 } else { 0.2 };
let performance_score = match requirements.performance_priority {
PerformancePriority::Speed => 0.9,
PerformancePriority::Memory => 0.7,
PerformancePriority::Balanced => 0.8,
_ => 0.6,
};
StrategyCapability {
can_handle,
confidence,
performance_score,
resource_cost: ResourceCost {
memory: requirements.estimated_size, cpu: 5.0, disk: requirements.estimated_size,
network: 0,
},
}
}
fn performance_profile(&self) -> PerformanceProfile {
PerformanceProfile {
read_speed: Speed::VeryFast,
write_speed: Speed::Fast,
memory_efficiency: Efficiency::Good,
compression_ratio: 1.0, query_optimization: QueryOptimization::Basic,
parallel_scalability: ParallelScalability::Good,
}
}
fn storage_stats(&self) -> StorageStats {
StorageStats::default()
}
fn optimize_for_pattern(&mut self, pattern: AccessPattern) -> Result<()> {
match pattern {
AccessPattern::Sequential => {
self.config.prefetch_ahead = true;
self.config.prefetch_distance = 8;
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
prefetch_engine.set_strategy(PrefetchStrategy::Sequential);
}
},
AccessPattern::Random => {
self.config.prefetch_ahead = false;
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
prefetch_engine.set_strategy(PrefetchStrategy::Adaptive);
}
},
AccessPattern::Streaming => {
self.config.bypass_cache = true;
self.config.use_huge_pages = true;
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
prefetch_engine.set_strategy(PrefetchStrategy::Streaming);
}
},
AccessPattern::Columnar => {
self.config.column_aligned_pages = true;
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
prefetch_engine.set_strategy(PrefetchStrategy::Columnar);
}
},
_ => {
if let Ok(mut prefetch_engine) = self.prefetch_engine.lock() {
prefetch_engine.set_strategy(PrefetchStrategy::Adaptive);
}
}
}
Ok(())
}
fn compact(&mut self, _handle: &Self::Handle) -> Result<CompactionResult> {
let start_time = Instant::now();
Ok(CompactionResult {
size_before: 0,
size_after: 0,
duration: start_time.elapsed(),
})
}
}
#[derive(Debug, Clone)]
pub struct MemoryMappedMetadata {
pub file_size: usize,
pub page_size: usize,
pub numa_node: Option<u32>,
pub access_pattern: AccessPattern,
pub prefetch_stats: PrefetchStats,
}
#[derive(Debug, Clone)]
pub struct PrefetchStats {
pub prefetch_requests: u64,
pub prefetch_hits: u64,
pub prefetch_misses: u64,
pub bytes_prefetched: u64,
}
impl PrefetchStats {
pub fn hit_rate(&self) -> f64 {
if self.prefetch_requests == 0 {
0.0
} else {
self.prefetch_hits as f64 / self.prefetch_requests as f64
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_access_pattern_features() {
let mut features = AccessPatternFeatures::new();
features.recent_accesses = vec![0, 1024, 2048, 3072];
features.sequential_ratio = 0.8;
features.random_ratio = 0.2;
assert_eq!(features.recent_accesses.len(), 4);
assert!(features.sequential_ratio > features.random_ratio);
}
#[test]
fn test_prediction_model() {
let model = RuleBasedPredictionModel::new(ModelConfig::default());
let features = AccessPatternFeatures {
recent_accesses: vec![0, 1024, 2048],
sequential_ratio: 0.9,
stride_pattern: vec![1024, 1024],
..AccessPatternFeatures::new()
};
let prediction = model.predict(&features);
assert!(prediction.is_some());
let result = prediction.expect("operation should succeed");
assert!(!result.predicted_offsets.is_empty());
}
#[test]
fn test_access_tracker() {
let mut tracker = AccessTracker::new();
let range = ChunkRange::new(0, 1024);
tracker.record_access(&range);
let recent = tracker.get_recent_accesses(10);
assert_eq!(recent.len(), 1);
assert_eq!(recent[0].offset, 0);
assert_eq!(recent[0].size, 1024);
}
#[test]
fn test_memory_mapped_strategy() {
let config = MemoryMappedConfig::default();
let mut strategy = IntelligentMemoryMappedStrategy::new(config);
let storage_config = StorageConfig {
requirements: StorageRequirements {
estimated_size: 10 * 1024 * 1024, access_pattern: AccessPattern::Sequential,
..Default::default()
},
..Default::default()
};
let handle = strategy.create_storage(&storage_config).expect("operation should succeed");
assert_eq!(handle.mapping.size, 10 * 1024 * 1024);
}
#[test]
fn test_capability_assessment() {
let config = MemoryMappedConfig::default();
let strategy = IntelligentMemoryMappedStrategy::new(config);
let requirements = StorageRequirements {
estimated_size: 50 * 1024 * 1024, access_pattern: AccessPattern::Sequential,
performance_priority: PerformancePriority::Speed,
..Default::default()
};
let capability = strategy.can_handle(&requirements);
assert!(capability.can_handle);
assert!(capability.confidence > 0.8);
}
#[test]
fn test_range_utilities() {
let range1 = ChunkRange::new(0, 100);
let range2 = ChunkRange::new(50, 150);
let range3 = ChunkRange::new(200, 300);
assert!(ranges_overlap(&range1, &range2));
assert!(!ranges_overlap(&range1, &range3));
assert_eq!(distance_between_ranges(&range1, &range2), 0.0);
assert_eq!(distance_between_ranges(&range1, &range3), 100.0);
}
}