Skip to main content

scirs2_interpolate/
cache.rs

1//! Cache-aware algorithm implementations for interpolation
2//!
3//! This module provides cache-optimized versions of computationally intensive
4//! interpolation algorithms. The caching strategies focus on:
5//!
6//! - **Basis function caching**: Pre-computed and cached basis function evaluations
7//! - **Coefficient matrix caching**: Cached matrix factorizations and linear solves
8//! - **Distance matrix caching**: Cached distance computations for scattered data methods
9//! - **Knot span caching**: Cached knot span lookups for B-splines
10//! - **Memory layout optimization**: Data structures optimized for cache locality
11//!
12//! These optimizations can provide significant performance improvements for:
13//! - Repeated evaluations at similar points
14//! - Large datasets with repeated computations
15//! - Real-time applications requiring fast interpolation
16//!
17//! # Examples
18//!
19//! ```rust
20//! use scirs2_core::ndarray::Array1;
21//! use scirs2_interpolate::cache::{CachedBSpline, BSplineCache};
22//! use scirs2_interpolate::bspline::ExtrapolateMode;
23//!
24//! // Create a cached B-spline for fast repeated evaluations
25//! let knots = Array1::linspace(0.0, 10.0, 20);
26//! let coeffs = Array1::linspace(-1.0, 1.0, 16);
27//!
28//! let mut cached_spline = CachedBSpline::new(
29//!     &knots.view(),
30//!     &coeffs.view(),
31//!     3, // degree
32//!     ExtrapolateMode::Extrapolate,
33//!     BSplineCache::default(),
34//! ).expect("Operation failed");
35//!
36//! // Use cached evaluation for fast performance
37//! for x in Array1::linspace(0.0, 10.0, 1000).iter() {
38//!     let y = cached_spline.evaluate_cached(*x).expect("Operation failed");
39//! }
40//! ```
41
42use crate::bspline::{BSpline, ExtrapolateMode};
43use crate::error::InterpolateResult;
44use scirs2_core::ndarray::{Array1, Array2, ArrayView1};
45use scirs2_core::numeric::{Float, FromPrimitive, Zero};
46use std::collections::HashMap;
47use std::fmt::{Debug, Display};
48use std::hash::{Hash, Hasher};
49use std::ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, RemAssign, Sub, SubAssign};
50
51// Simple random generation for eviction policy
52
53/// Cache eviction policies
54#[derive(Debug, Clone, Copy, PartialEq)]
55pub enum EvictionPolicy {
56    /// Least Recently Used - evict oldest accessed items
57    LRU,
58    /// Least Frequently Used - evict least accessed items  
59    LFU,
60    /// First In First Out - evict oldest inserted items
61    FIFO,
62    /// Random eviction
63    Random,
64    /// Adaptive policy based on access patterns
65    Adaptive,
66}
67
68/// Configuration for cache behavior
69#[derive(Debug, Clone)]
70pub struct CacheConfig {
71    /// Maximum number of entries in basis function cache
72    pub max_basis_cache_size: usize,
73    /// Maximum number of entries in coefficient matrix cache  
74    pub max_matrix_cache_size: usize,
75    /// Maximum number of entries in distance matrix cache
76    pub max_distance_cache_size: usize,
77    /// Tolerance for cache key matching (for floating point comparisons)
78    pub tolerance: f64,
79    /// Whether to enable cache statistics tracking
80    pub track_stats: bool,
81    /// Cache eviction strategy
82    pub eviction_policy: EvictionPolicy,
83    /// Memory limit for caches in MB (0 = no limit)
84    pub memory_limit_mb: usize,
85    /// Enable adaptive cache sizing based on access patterns
86    pub adaptive_sizing: bool,
87}
88
89impl Default for CacheConfig {
90    fn default() -> Self {
91        Self {
92            max_basis_cache_size: 1024,
93            max_matrix_cache_size: 64,
94            max_distance_cache_size: 256,
95            tolerance: 1e-12,
96            track_stats: false,
97            eviction_policy: EvictionPolicy::LRU,
98            memory_limit_mb: 0, // No limit by default
99            adaptive_sizing: true,
100        }
101    }
102}
103
104/// Cache statistics for performance monitoring
105#[derive(Debug, Clone)]
106pub struct CacheStats {
107    /// Number of cache hits
108    pub hits: usize,
109    /// Number of cache misses  
110    pub misses: usize,
111    /// Number of cache evictions
112    pub evictions: usize,
113    /// Total memory usage in bytes
114    pub memory_usage_bytes: usize,
115    /// Average access frequency for adaptive sizing
116    pub avg_access_frequency: f64,
117    /// Number of cache resizes
118    pub resize_count: usize,
119    /// Peak memory usage
120    pub peak_memory_bytes: usize,
121    /// Last cleanup timestamp (for TTL-based eviction)
122    pub last_cleanup_time: std::time::Instant,
123}
124
125impl Default for CacheStats {
126    fn default() -> Self {
127        Self {
128            hits: 0,
129            misses: 0,
130            evictions: 0,
131            memory_usage_bytes: 0,
132            avg_access_frequency: 0.0,
133            resize_count: 0,
134            peak_memory_bytes: 0,
135            last_cleanup_time: std::time::Instant::now(),
136        }
137    }
138}
139
140impl CacheStats {
141    /// Calculate cache hit ratio
142    pub fn hit_ratio(&self) -> f64 {
143        if self.hits + self.misses == 0 {
144            0.0
145        } else {
146            self.hits as f64 / (self.hits + self.misses) as f64
147        }
148    }
149
150    /// Calculate cache efficiency score (combines hit ratio and memory usage)
151    pub fn efficiency_score(&self) -> f64 {
152        let hit_ratio = self.hit_ratio();
153        let memory_factor = if self.peak_memory_bytes > 0 {
154            1.0 - (self.memory_usage_bytes as f64 / self.peak_memory_bytes as f64)
155        } else {
156            1.0
157        };
158        (hit_ratio + memory_factor) / 2.0
159    }
160
161    /// Get memory usage in MB
162    pub fn memory_usage_mb(&self) -> f64 {
163        self.memory_usage_bytes as f64 / (1024.0 * 1024.0)
164    }
165
166    /// Get peak memory usage in MB
167    pub fn peak_memory_mb(&self) -> f64 {
168        self.peak_memory_bytes as f64 / (1024.0 * 1024.0)
169    }
170
171    /// Update memory usage statistics
172    pub fn update_memory_usage(&mut self, currentbytes: usize) {
173        self.memory_usage_bytes = currentbytes;
174        if currentbytes > self.peak_memory_bytes {
175            self.peak_memory_bytes = currentbytes;
176        }
177    }
178
179    /// Update access frequency for adaptive sizing
180    pub fn update_access_frequency(&mut self, new_accesscount: usize) {
181        let alpha = 0.1; // Exponential smoothing factor
182        let new_freq = new_accesscount as f64;
183        self.avg_access_frequency = alpha * new_freq + (1.0 - alpha) * self.avg_access_frequency;
184    }
185
186    /// Check if cleanup is needed based on time threshold
187    pub fn needs_cleanup(&self, thresholdsecs: u64) -> bool {
188        self.last_cleanup_time.elapsed().as_secs() >= thresholdsecs
189    }
190
191    /// Reset all statistics
192    pub fn reset(&mut self) {
193        self.hits = 0;
194        self.misses = 0;
195        self.evictions = 0;
196        self.memory_usage_bytes = 0;
197        self.avg_access_frequency = 0.0;
198        self.resize_count = 0;
199        self.peak_memory_bytes = 0;
200        self.last_cleanup_time = std::time::Instant::now();
201    }
202}
203
204/// A floating-point key that can be hashed with tolerance
205#[derive(Debug, Clone)]
206struct FloatKey<F: Float> {
207    value: F,
208    tolerance: F,
209}
210
211impl<F: Float> FloatKey<F> {
212    #[allow(dead_code)]
213    fn new(value: F, tolerance: F) -> Self {
214        Self { value, tolerance }
215    }
216}
217
218impl<F: crate::traits::InterpolationFloat> PartialEq for FloatKey<F> {
219    fn eq(&self, other: &Self) -> bool {
220        (self.value - other.value).abs() <= self.tolerance
221    }
222}
223
224impl<F: crate::traits::InterpolationFloat> Eq for FloatKey<F> {}
225
226impl<F: crate::traits::InterpolationFloat> Hash for FloatKey<F> {
227    fn hash<H: Hasher>(&self, state: &mut H) {
228        // Quantize the float to the tolerance for consistent hashing
229        let quantized = (self.value / self.tolerance).round() * self.tolerance;
230        // Convert to bits for hashing (this is approximate)
231        let bits = quantized.to_f64().unwrap_or(0.0).to_bits();
232        bits.hash(state);
233    }
234}
235
236/// Cache for B-spline basis function evaluations
237#[derive(Debug)]
238pub struct BSplineCache<F: Float> {
239    /// Cache for basis function values with access tracking
240    basis_cache: HashMap<(FloatKey<F>, usize, usize), CacheEntry<F>>,
241    /// Cache for knot span indices with access tracking
242    span_cache: HashMap<FloatKey<F>, CacheEntry<usize>>,
243    /// Configuration
244    config: CacheConfig,
245    /// Statistics
246    stats: CacheStats,
247    /// Access counter for LRU/LFU tracking
248    #[allow(dead_code)]
249    access_counter: usize,
250}
251
252/// Cache key for basis function evaluations
253#[derive(Debug, Clone, PartialEq, Eq, Hash)]
254pub struct BasisCacheKey {
255    /// Quantized x value for consistent cache key generation
256    pub x_quantized: u64,
257    /// Basis function index
258    pub index: usize,
259    /// Polynomial degree
260    pub degree: usize,
261}
262
263/// Cache entry with metadata for advanced eviction policies
264#[derive(Debug, Clone)]
265struct CacheEntry<T> {
266    /// The cached value
267    #[allow(dead_code)]
268    value: T,
269    /// Last access time (for LRU)
270    #[allow(dead_code)]
271    last_access: usize,
272    /// Access frequency (for LFU)
273    #[allow(dead_code)]
274    access_count: usize,
275    /// Insertion time (for FIFO)
276    #[allow(dead_code)]
277    insertion_time: usize,
278    /// Estimated memory size in bytes
279    #[allow(dead_code)]
280    memory_size: usize,
281}
282
283#[allow(dead_code)]
284impl<T> CacheEntry<T> {
285    /// Create a new cache entry
286    fn new(_value: T, insertiontime: usize) -> Self {
287        let memory_size = std::mem::size_of::<T>() + std::mem::size_of::<Self>();
288        Self {
289            value: _value,
290            last_access: insertiontime,
291            access_count: 1,
292            insertion_time: insertiontime,
293            memory_size,
294        }
295    }
296
297    /// Update access statistics
298    fn update_access(&mut self, currenttime: usize) {
299        self.last_access = currenttime;
300        self.access_count += 1;
301    }
302
303    /// Calculate priority for eviction (lower means more likely to be evicted)
304    fn eviction_priority(&self, policy: EvictionPolicy, currenttime: usize) -> f64 {
305        match policy {
306            EvictionPolicy::LRU => -(self.last_access as f64),
307            EvictionPolicy::LFU => -(self.access_count as f64),
308            EvictionPolicy::FIFO => -(self.insertion_time as f64),
309            EvictionPolicy::Random => {
310                // Simple linear congruential generator for randomness
311                let x = (self.insertion_time * 1103515245 + 12345) & 0x7fffffff;
312                x as f64 / 0x7fffffff as f64
313            }
314            EvictionPolicy::Adaptive => {
315                // Combine recency and frequency with memory size consideration
316                let recency = (currenttime - self.last_access) as f64;
317                let frequency = self.access_count as f64;
318                let memory_factor = self.memory_size as f64;
319
320                // Higher score means less likely to be evicted
321                -(frequency / (1.0 + recency + memory_factor / 1000.0))
322            }
323        }
324    }
325}
326
327impl<F: crate::traits::InterpolationFloat> Default for BSplineCache<F> {
328    fn default() -> Self {
329        Self::new(CacheConfig::default())
330    }
331}
332
333#[allow(dead_code)]
334impl<F: crate::traits::InterpolationFloat> BSplineCache<F> {
335    /// Create a new B-spline cache with the given configuration
336    pub fn new(config: CacheConfig) -> Self {
337        Self {
338            basis_cache: HashMap::new(),
339            span_cache: HashMap::new(),
340            config,
341            stats: CacheStats::default(),
342            access_counter: 0,
343        }
344    }
345
346    /// Get cached basis function value or compute and cache it
347    fn get_or_compute_basis<T>(
348        &mut self,
349        x: F,
350        i: usize,
351        k: usize,
352        knots: &[T],
353        computer: impl FnOnce() -> T,
354    ) -> T
355    where
356        T: Float + Copy,
357    {
358        self.access_counter += 1;
359        let tolerance = F::from_f64(self.config.tolerance).expect("Operation failed");
360        let key = (FloatKey::new(x, tolerance), i, k);
361
362        if let Some(cache_entry) = self.basis_cache.get_mut(&key) {
363            if self.config.track_stats {
364                self.stats.hits += 1;
365            }
366
367            // Update access statistics
368            cache_entry.update_access(self.access_counter);
369
370            // Convert from F to T (this assumes they're the same type or compatible)
371            unsafe { std::mem::transmute_copy(&cache_entry.value) }
372        } else {
373            if self.config.track_stats {
374                self.stats.misses += 1;
375            }
376            let computed = computer();
377
378            // Convert from T to F for caching (again, assumes compatibility)
379            let cached: F = unsafe { std::mem::transmute_copy(&computed) };
380
381            // Check cache size and evict if necessary
382            if self.basis_cache.len() >= self.config.max_basis_cache_size {
383                self.evict_basis_cache();
384            }
385
386            // Create cache entry with metadata
387            let cache_entry = CacheEntry::new(cached, self.access_counter);
388            self.basis_cache.insert(key, cache_entry);
389
390            // Update memory usage statistics
391            if self.config.track_stats {
392                self.update_memory_usage();
393            }
394
395            computed
396        }
397    }
398
399    /// Get cached basis function value using new key structure
400    pub fn get_or_compute_basis_with_key(
401        &mut self,
402        key: BasisCacheKey,
403        computer: impl FnOnce() -> F,
404    ) -> F {
405        self.access_counter += 1;
406
407        // Convert BasisCacheKey to internal cache key format
408        let tolerance = F::from_f64(self.config.tolerance).expect("Operation failed");
409        let x = F::from_f64(f64::from_bits(key.x_quantized)).unwrap_or_else(F::zero);
410        let internal_key = (FloatKey::new(x, tolerance), key.index, key.degree);
411
412        if let Some(cache_entry) = self.basis_cache.get_mut(&internal_key) {
413            if self.config.track_stats {
414                self.stats.hits += 1;
415            }
416
417            // Update access statistics
418            cache_entry.update_access(self.access_counter);
419            cache_entry.value
420        } else {
421            if self.config.track_stats {
422                self.stats.misses += 1;
423            }
424            let computed = computer();
425
426            // Check cache size and evict if necessary
427            if self.basis_cache.len() >= self.config.max_basis_cache_size {
428                self.evict_basis_cache();
429            }
430
431            // Create cache entry with metadata
432            let cache_entry = CacheEntry::new(computed, self.access_counter);
433            self.basis_cache.insert(internal_key, cache_entry);
434
435            // Update memory usage statistics
436            if self.config.track_stats {
437                self.update_memory_usage();
438            }
439
440            computed
441        }
442    }
443
444    /// Get cached knot span or compute and cache it
445    fn get_or_compute_span(&mut self, x: F, computer: impl FnOnce() -> usize) -> usize {
446        self.access_counter += 1;
447        let tolerance = F::from_f64(self.config.tolerance).expect("Operation failed");
448        let key = FloatKey::new(x, tolerance);
449
450        if let Some(cache_entry) = self.span_cache.get_mut(&key) {
451            if self.config.track_stats {
452                self.stats.hits += 1;
453            }
454
455            // Update access statistics
456            cache_entry.update_access(self.access_counter);
457            cache_entry.value
458        } else {
459            if self.config.track_stats {
460                self.stats.misses += 1;
461            }
462            let computed = computer();
463
464            // Create cache entry with metadata
465            let cache_entry = CacheEntry::new(computed, self.access_counter);
466            self.span_cache.insert(key, cache_entry);
467
468            // Update memory usage statistics
469            if self.config.track_stats {
470                self.update_memory_usage();
471            }
472
473            computed
474        }
475    }
476
477    /// Update memory usage statistics
478    fn update_memory_usage(&mut self) {
479        if !self.config.track_stats {
480            return;
481        }
482
483        let basis_memory: usize = self
484            .basis_cache
485            .values()
486            .map(|entry| entry.memory_size)
487            .sum();
488        let span_memory: usize = self
489            .span_cache
490            .values()
491            .map(|entry| entry.memory_size)
492            .sum();
493
494        let total_memory = basis_memory + span_memory;
495        self.stats.update_memory_usage(total_memory);
496
497        // Check memory limit if specified
498        if self.config.memory_limit_mb > 0 {
499            let limit_bytes = self.config.memory_limit_mb * 1024 * 1024;
500            if total_memory > limit_bytes {
501                self.evict_basis_cache_by_memory();
502            }
503        }
504    }
505
506    /// Evict entries from the basis cache based on memory pressure
507    fn evict_basis_cache_by_memory(&mut self) {
508        let target_size = self.config.memory_limit_mb * 1024 * 1024 * 3 / 4; // Target 75% of limit
509        let mut current_memory = self.stats.memory_usage_bytes;
510
511        if current_memory <= target_size {
512            return;
513        }
514
515        // Create vector of (key, priority) pairs for sorting
516        let mut entries: Vec<_> = self
517            .basis_cache
518            .iter()
519            .map(|(key, entry)| {
520                let priority =
521                    entry.eviction_priority(self.config.eviction_policy, self.access_counter);
522                (key.clone(), priority, entry.memory_size)
523            })
524            .collect();
525
526        // Sort by eviction priority (lowest first)
527        entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
528
529        // Remove entries until we're under the target
530        for (key_, _, memory_size) in entries {
531            if current_memory <= target_size {
532                break;
533            }
534
535            self.basis_cache.remove(&key_);
536            current_memory = current_memory.saturating_sub(memory_size);
537
538            if self.config.track_stats {
539                self.stats.evictions += 1;
540            }
541        }
542
543        // Update memory statistics
544        self.update_memory_usage();
545    }
546
547    /// Evict some entries from the basis cache when it gets too large
548    /// Uses advanced eviction policies for better cache performance
549    fn evict_basis_cache(&mut self) {
550        let total_entries = self.basis_cache.len();
551        let remove_count = if self.config.adaptive_sizing {
552            // Adaptive removal based on hit ratio
553            let hit_ratio = self.stats.hit_ratio();
554            if hit_ratio > 0.8 {
555                total_entries / 8 // Remove fewer entries if hit ratio is high
556            } else if hit_ratio > 0.5 {
557                total_entries / 4 // Standard removal
558            } else {
559                total_entries / 2 // Remove more entries if hit ratio is low
560            }
561        } else {
562            total_entries / 4 // Remove 25% by default
563        };
564
565        // Create vector of (key, priority) pairs for sorting
566        let mut entries: Vec<_> = self
567            .basis_cache
568            .iter()
569            .map(|(key, entry)| {
570                let priority =
571                    entry.eviction_priority(self.config.eviction_policy, self.access_counter);
572                (key.clone(), priority)
573            })
574            .collect();
575
576        // Sort by eviction priority (lowest first = most likely to be evicted)
577        entries.sort_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
578
579        // Remove the lowest priority entries
580        for (key, _) in entries.into_iter().take(remove_count) {
581            self.basis_cache.remove(&key);
582            if self.config.track_stats {
583                self.stats.evictions += 1;
584            }
585        }
586
587        // Update statistics
588        if self.config.track_stats {
589            self.stats.resize_count += 1;
590            self.update_memory_usage();
591        }
592    }
593
594    /// Clear all cached data
595    pub fn clear(&mut self) {
596        self.basis_cache.clear();
597        self.span_cache.clear();
598    }
599
600    /// Get cache statistics
601    pub fn stats(&self) -> &CacheStats {
602        &self.stats
603    }
604
605    /// Reset cache statistics
606    pub fn reset_stats(&mut self) {
607        self.stats.reset();
608    }
609}
610
611/// A cache-aware B-spline implementation
612#[derive(Debug)]
613pub struct CachedBSpline<T>
614where
615    T: Float
616        + FromPrimitive
617        + Debug
618        + Display
619        + Add<Output = T>
620        + Sub<Output = T>
621        + Mul<Output = T>
622        + Div<Output = T>
623        + AddAssign
624        + SubAssign
625        + MulAssign
626        + DivAssign
627        + RemAssign
628        + Zero
629        + Copy,
630{
631    /// The underlying B-spline
632    spline: BSpline<T>,
633    /// Cache for basis function evaluations
634    cache: BSplineCache<T>,
635}
636
637#[allow(dead_code)]
638impl<T> CachedBSpline<T>
639where
640    T: crate::traits::InterpolationFloat,
641{
642    /// Create a new cached B-spline
643    ///
644    /// # Arguments
645    ///
646    /// * `knots` - Knot vector for the B-spline
647    /// * `coeffs` - Control coefficients
648    /// * `degree` - Degree of the B-spline
649    /// * `extrapolate` - Extrapolation mode for points outside the domain
650    /// * `cache` - Cache configuration for optimization
651    ///
652    /// # Examples
653    ///
654    /// ```rust
655    /// use scirs2_core::ndarray::Array1;
656    /// use scirs2_interpolate::cache::{CachedBSpline, BSplineCache, CacheConfig};
657    /// use scirs2_interpolate::bspline::ExtrapolateMode;
658    ///
659    /// let knots = Array1::from_vec(vec![0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0]);
660    /// let coeffs = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0, 5.0]);
661    ///
662    /// let cache_config = CacheConfig {
663    ///     track_stats: true,
664    ///     max_basis_cache_size: 512,
665    ///     ..Default::default()
666    /// };
667    /// let cache = BSplineCache::new(cache_config);
668    ///
669    /// let cached_spline = CachedBSpline::new(
670    ///     &knots.view(),
671    ///     &coeffs.view(),
672    ///     2, // quadratic
673    ///     ExtrapolateMode::Extrapolate,
674    ///     cache,
675    /// ).expect("Operation failed");
676    /// ```
677    pub fn new(
678        knots: &ArrayView1<T>,
679        coeffs: &ArrayView1<T>,
680        degree: usize,
681        extrapolate: ExtrapolateMode,
682        cache: BSplineCache<T>,
683    ) -> InterpolateResult<Self> {
684        let spline = BSpline::new(knots, coeffs, degree, extrapolate)?;
685
686        Ok(Self { spline, cache })
687    }
688
689    /// Evaluate the B-spline using cached basis functions
690    ///
691    /// This method provides optimized evaluation by caching basis function
692    /// computations. For repeated evaluations at similar points, this can
693    /// be significantly faster than standard evaluation.
694    ///
695    /// # Arguments
696    ///
697    /// * `x` - Point at which to evaluate the B-spline
698    ///
699    /// # Returns
700    ///
701    /// The value of the B-spline at x
702    ///
703    /// # Examples
704    ///
705    /// ```rust
706    /// use scirs2_core::ndarray::Array1;
707    /// use scirs2_interpolate::cache::make_cached_bspline;
708    /// use scirs2_interpolate::bspline::ExtrapolateMode;
709    ///
710    /// let knots = Array1::linspace(0.0, 10.0, 11);
711    /// let coeffs = Array1::linspace(1.0, 5.0, 8);
712    ///
713    /// let mut cached_spline = make_cached_bspline(
714    ///     &knots.view(),
715    ///     &coeffs.view(),
716    ///     2,
717    ///     ExtrapolateMode::Extrapolate,
718    /// ).expect("Operation failed");
719    ///
720    /// // Fast repeated evaluations
721    /// let x_values = Array1::linspace(0.0, 10.0, 100);
722    /// for &x in x_values.iter() {
723    ///     let y = cached_spline.evaluate_cached(x).expect("Operation failed");
724    ///     // Process result...
725    /// }
726    ///
727    /// // Check cache performance
728    /// let stats = cached_spline.cache_stats();
729    /// println!("Cache hit ratio: {:.2}", stats.hit_ratio());
730    /// ```
731    pub fn evaluate_cached(&mut self, x: T) -> InterpolateResult<T> {
732        // For this implementation, we'll delegate to the underlying spline
733        // In a full implementation, we would cache the basis function evaluations
734        // and use the cache during the evaluation process
735        self.evaluate_with_cache_optimization(x)
736    }
737
738    /// Evaluate with cache optimization for basis functions
739    fn evaluate_with_cache_optimization(&mut self, x: T) -> InterpolateResult<T> {
740        // Get knot vector and coefficients from the underlying spline
741        let knots = self.spline.knot_vector();
742        let coeffs = self.spline.coefficients();
743        let degree = self.spline.degree();
744
745        // Find the knot span using cached lookup
746        // Clone necessary data to avoid borrowing conflicts
747        let knots_clone = knots.to_owned();
748        let span = self
749            .cache
750            .get_or_compute_span(x, || Self::find_knot_span(x, &knots_clone, degree));
751
752        // Compute basis functions using cache
753        let mut result = T::zero();
754        for i in 0..=degree {
755            let basis_index = span.saturating_sub(degree) + i;
756            if basis_index < coeffs.len() {
757                // Cache key for this basis function evaluation
758                let basis_key = BasisCacheKey {
759                    x_quantized: self.quantize_x(x),
760                    index: basis_index,
761                    degree,
762                };
763
764                let basis_value = self.cache.get_or_compute_basis_with_key(basis_key, || {
765                    if basis_index < knots_clone.len() - degree - 1 {
766                        Self::compute_basis_function(x, basis_index, degree, &knots_clone)
767                    } else {
768                        T::zero()
769                    }
770                });
771
772                result += coeffs[basis_index] * basis_value;
773            }
774        }
775
776        Ok(result)
777    }
778
779    /// Quantize x value for cache key consistency
780    fn quantize_x(&self, x: T) -> u64 {
781        let x_f64 = x.to_f64().unwrap_or(0.0);
782        let tolerance = self.cache.config.tolerance;
783        let quantized = (x_f64 / tolerance).round() * tolerance;
784        quantized.to_bits()
785    }
786
787    /// Compute all non-zero basis functions at x using De Boor's algorithm
788    fn compute_basis_functions_at_x(&self, x: T, knots: &Array1<T>, degree: usize) -> Array1<T> {
789        let span = Self::find_knot_span(x, knots, degree);
790        let mut basis = Array1::zeros(degree + 1);
791
792        // De Boor's algorithm for all basis functions at once
793        basis[0] = T::one();
794
795        for j in 1..=degree {
796            let mut saved = T::zero();
797            for r in 0..j {
798                let left_knot_idx = span + 1 - j + r;
799                let right_knot_idx = span + r;
800
801                if left_knot_idx < knots.len() && right_knot_idx + 1 < knots.len() {
802                    let alpha = if knots[right_knot_idx + 1] != knots[left_knot_idx] {
803                        (x - knots[left_knot_idx])
804                            / (knots[right_knot_idx + 1] - knots[left_knot_idx])
805                    } else {
806                        T::zero()
807                    };
808
809                    let temp = basis[r];
810                    basis[r] = saved + (T::one() - alpha) * temp;
811                    saved = alpha * temp;
812                }
813            }
814            basis[j] = saved;
815        }
816
817        basis
818    }
819
820    /// Optimized batch evaluation using cached computations
821    pub fn evaluate_batch_optimized(
822        &mut self,
823        x_vals: &ArrayView1<T>,
824    ) -> InterpolateResult<Array1<T>> {
825        let mut results = Array1::zeros(x_vals.len());
826
827        // Pre-warm cache with commonly accessed spans
828        for &x in x_vals.iter().take(x_vals.len().min(10)) {
829            let _ = self.evaluate_cached(x)?;
830        }
831
832        // Evaluate all points
833        for (i, &x) in x_vals.iter().enumerate() {
834            results[i] = self.evaluate_cached(x)?;
835        }
836
837        Ok(results)
838    }
839
840    /// Clear cache and force recomputation
841    pub fn refresh_cache(&mut self) {
842        self.cache.clear();
843    }
844
845    /// Optimize cache settings based on usage patterns
846    pub fn optimize_cache_settings(&mut self) {
847        let (hit_ratio, total_requests) = {
848            let stats = self.cache.stats();
849            (stats.hit_ratio(), stats.hits + stats.misses)
850        };
851
852        // Adjust cache size based on hit ratio
853        if hit_ratio < 0.3 && self.cache.config.max_basis_cache_size > 64 {
854            // Low hit ratio - reduce cache size
855            self.cache.config.max_basis_cache_size /= 2;
856        } else if hit_ratio > 0.8 && self.cache.config.max_basis_cache_size < 4096 {
857            // High hit ratio - increase cache size
858            self.cache.config.max_basis_cache_size *= 2;
859        }
860
861        // Enable aggressive caching for frequently accessed data
862        if total_requests > 1000 {
863            self.cache.config.adaptive_sizing = true;
864        }
865    }
866
867    /// Find the knot span containing x
868    fn find_knot_span(x: T, knots: &Array1<T>, degree: usize) -> usize {
869        let n = knots.len() - degree - 1;
870
871        if x >= knots[n] {
872            return n - 1;
873        }
874        if x <= knots[degree] {
875            return degree;
876        }
877
878        // Binary search
879        let mut low = degree;
880        let mut high = n;
881        let mut mid = (low + high) / 2;
882
883        while x < knots[mid] || x >= knots[mid + 1] {
884            if x < knots[mid] {
885                high = mid;
886            } else {
887                low = mid;
888            }
889            mid = (low + high) / 2;
890        }
891
892        mid
893    }
894
895    /// Compute a single basis function value
896    #[allow(clippy::only_used_in_recursion)]
897    fn compute_basis_function(x: T, i: usize, degree: usize, knots: &Array1<T>) -> T {
898        // De Boor's algorithm for a single basis function
899        if degree == 0 {
900            if i < knots.len() - 1 && x >= knots[i] && x < knots[i + 1] {
901                T::one()
902            } else {
903                T::zero()
904            }
905        } else {
906            let mut left = T::zero();
907            let mut right = T::zero();
908
909            // Left recursion
910            if i < knots.len() - degree - 1 && knots[i + degree] != knots[i] {
911                let basis_left = Self::compute_basis_function(x, i, degree - 1, knots);
912                left = (x - knots[i]) / (knots[i + degree] - knots[i]) * basis_left;
913            }
914
915            // Right recursion
916            if i + 1 < knots.len() - degree - 1 && knots[i + degree + 1] != knots[i + 1] {
917                let basis_right = Self::compute_basis_function(x, i + 1, degree - 1, knots);
918                right = (knots[i + degree + 1] - x) / (knots[i + degree + 1] - knots[i + 1])
919                    * basis_right;
920            }
921
922            left + right
923        }
924    }
925
926    /// Evaluate the B-spline using the standard (non-cached) method
927    pub fn evaluate_standard(&self, x: T) -> InterpolateResult<T> {
928        self.spline.evaluate(x)
929    }
930
931    /// Evaluate at multiple points using cached basis functions
932    ///
933    /// This method efficiently evaluates the B-spline at multiple points,
934    /// leveraging cached computations for improved performance.
935    ///
936    /// # Arguments
937    ///
938    /// * `x_vals` - Array of x-coordinates at which to evaluate
939    ///
940    /// # Returns
941    ///
942    /// Array of B-spline values corresponding to the input coordinates
943    ///
944    /// # Examples
945    ///
946    /// ```rust
947    /// use scirs2_core::ndarray::Array1;
948    /// use scirs2_interpolate::cache::make_cached_bspline;
949    /// use scirs2_interpolate::bspline::ExtrapolateMode;
950    ///
951    /// let knots = Array1::from_vec(vec![0.0, 0.0, 1.0, 2.0, 3.0, 3.0]);
952    /// let coeffs = Array1::from_vec(vec![1.0, 2.0, 3.0, 4.0]);
953    ///
954    /// let mut cached_spline = make_cached_bspline(
955    ///     &knots.view(),
956    ///     &coeffs.view(),
957    ///     1, // linear
958    ///     ExtrapolateMode::Extrapolate,
959    /// ).expect("Operation failed");
960    ///
961    /// // Evaluate at multiple points efficiently
962    /// let x_vals = Array1::from_vec(vec![0.5, 1.0, 1.5, 2.0, 2.5]);
963    /// let results = cached_spline.evaluate_array_cached(&x_vals.view()).expect("Operation failed");
964    ///
965    /// assert_eq!(results.len(), x_vals.len());
966    /// ```
967    pub fn evaluate_array_cached(
968        &mut self,
969        x_vals: &ArrayView1<T>,
970    ) -> InterpolateResult<Array1<T>> {
971        let mut results = Array1::zeros(x_vals.len());
972        for (i, &x) in x_vals.iter().enumerate() {
973            results[i] = self.evaluate_cached(x)?;
974        }
975        Ok(results)
976    }
977
978    /// Get access to the cache statistics
979    pub fn cache_stats(&self) -> &CacheStats {
980        self.cache.stats()
981    }
982
983    /// Reset cache statistics
984    pub fn reset_cache_stats(&mut self) {
985        self.cache.reset_stats();
986    }
987
988    /// Clear all cached data
989    pub fn clear_cache(&mut self) {
990        self.cache.clear();
991    }
992
993    /// Get the underlying B-spline
994    pub fn spline(&self) -> &BSpline<T> {
995        &self.spline
996    }
997}
998
999/// Cache for distance matrices used in scattered data interpolation
1000#[derive(Debug)]
1001pub struct DistanceMatrixCache<F: Float> {
1002    /// Cache for computed distance matrices
1003    matrix_cache: HashMap<u64, Array2<F>>,
1004    /// Configuration
1005    config: CacheConfig,
1006    /// Statistics
1007    stats: CacheStats,
1008}
1009
1010impl<F: crate::traits::InterpolationFloat> DistanceMatrixCache<F> {
1011    /// Create a new distance matrix cache
1012    pub fn new(config: CacheConfig) -> Self {
1013        Self {
1014            matrix_cache: HashMap::new(),
1015            config,
1016            stats: CacheStats::default(),
1017        }
1018    }
1019
1020    /// Get a cached distance matrix or compute and cache it
1021    pub fn get_or_compute_distance_matrix<T>(
1022        &mut self,
1023        points: &Array2<T>,
1024        computer: impl FnOnce(&Array2<T>) -> Array2<F>,
1025    ) -> Array2<F>
1026    where
1027        T: Float,
1028    {
1029        // Create a hash of the points array for cache key
1030        let key = self.hash_points_safe(points);
1031
1032        if let Some(cached_matrix) = self.matrix_cache.get(&key) {
1033            if self.config.track_stats {
1034                self.stats.hits += 1;
1035            }
1036            cached_matrix.clone()
1037        } else {
1038            if self.config.track_stats {
1039                self.stats.misses += 1;
1040            }
1041
1042            let computed = computer(points);
1043
1044            // Check cache size and evict if necessary
1045            if self.matrix_cache.len() >= self.config.max_distance_cache_size {
1046                self.evict_matrix_cache();
1047            }
1048
1049            self.matrix_cache.insert(key, computed.clone());
1050            computed
1051        }
1052    }
1053
1054    /// Create a hash of the points array using a safe approach for floating point
1055    fn hash_points_safe<T: Float>(&self, points: &Array2<T>) -> u64 {
1056        use std::collections::hash_map::DefaultHasher;
1057        let mut hasher = DefaultHasher::new();
1058
1059        // Hash the shape
1060        points.shape()[0].hash(&mut hasher);
1061        points.shape()[1].hash(&mut hasher);
1062
1063        // Hash a subset of points for efficiency (or all if small)
1064        let hash_stride = if points.len() > 1000 {
1065            points.len() / 100
1066        } else {
1067            1
1068        };
1069
1070        for (i, &val) in points.iter().enumerate() {
1071            if i % hash_stride == 0 {
1072                // Convert to f64 and then to bits for consistent hashing
1073                let val_f64 = val.to_f64().unwrap_or(0.0);
1074                // Quantize to tolerance for consistent hashing of similar values
1075                let quantized = (val_f64 / self.config.tolerance).round() * self.config.tolerance;
1076                let bits = quantized.to_bits();
1077                bits.hash(&mut hasher);
1078            }
1079        }
1080
1081        hasher.finish()
1082    }
1083
1084    /// Evict some entries from the matrix cache
1085    fn evict_matrix_cache(&mut self) {
1086        let remove_count = self.matrix_cache.len() / 4; // Remove 25%
1087        let keys_to_remove: Vec<_> = self
1088            .matrix_cache
1089            .keys()
1090            .take(remove_count)
1091            .cloned()
1092            .collect();
1093
1094        for key in keys_to_remove {
1095            self.matrix_cache.remove(&key);
1096            if self.config.track_stats {
1097                self.stats.evictions += 1;
1098            }
1099        }
1100    }
1101
1102    /// Clear all cached data
1103    pub fn clear(&mut self) {
1104        self.matrix_cache.clear();
1105    }
1106
1107    /// Get cache statistics
1108    pub fn stats(&self) -> &CacheStats {
1109        &self.stats
1110    }
1111
1112    /// Reset cache statistics
1113    pub fn reset_stats(&mut self) {
1114        self.stats.reset();
1115    }
1116}
1117
1118/// Create a cached B-spline with default cache settings
1119#[allow(dead_code)]
1120pub fn make_cached_bspline<T>(
1121    knots: &ArrayView1<T>,
1122    coeffs: &ArrayView1<T>,
1123    degree: usize,
1124    extrapolate: ExtrapolateMode,
1125) -> InterpolateResult<CachedBSpline<T>>
1126where
1127    T: crate::traits::InterpolationFloat,
1128{
1129    let cache = BSplineCache::default();
1130    CachedBSpline::new(knots, coeffs, degree, extrapolate, cache)
1131}
1132
1133/// Create a cached B-spline with custom cache configuration
1134#[allow(dead_code)]
1135pub fn make_cached_bspline_with_config<T>(
1136    knots: &ArrayView1<T>,
1137    coeffs: &ArrayView1<T>,
1138    degree: usize,
1139    extrapolate: ExtrapolateMode,
1140    cache_config: CacheConfig,
1141) -> InterpolateResult<CachedBSpline<T>>
1142where
1143    T: crate::traits::InterpolationFloat,
1144{
1145    let cache = BSplineCache::new(cache_config);
1146    CachedBSpline::new(knots, coeffs, degree, extrapolate, cache)
1147}
1148
1149#[cfg(test)]
1150mod tests {
1151    use super::*;
1152    use approx::assert_relative_eq;
1153    use scirs2_core::ndarray::array;
1154
1155    #[test]
1156    fn test_cached_bspline_evaluation() {
1157        // Create a simple B-spline
1158        let knots = array![0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0];
1159        let coeffs = array![1.0, 2.0, 3.0, 4.0, 5.0];
1160
1161        let mut cached_spline = make_cached_bspline(
1162            &knots.view(),
1163            &coeffs.view(),
1164            2, // quadratic
1165            ExtrapolateMode::Extrapolate,
1166        )
1167        .expect("Operation failed");
1168
1169        // Test evaluation at a few points
1170        let test_points = array![0.5, 1.0, 1.5, 2.0, 2.5];
1171
1172        for &x in test_points.iter() {
1173            let cached_result = cached_spline.evaluate_cached(x).expect("Operation failed");
1174            let standard_result = cached_spline
1175                .evaluate_standard(x)
1176                .expect("Operation failed");
1177
1178            // Results should be very close
1179            assert_relative_eq!(cached_result, standard_result, epsilon = 1e-10);
1180        }
1181    }
1182
1183    #[test]
1184    fn test_cache_statistics() {
1185        let knots = array![0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0];
1186        let coeffs = array![1.0, 2.0, 3.0, 4.0];
1187
1188        let mut cached_spline = make_cached_bspline_with_config(
1189            &knots.view(),
1190            &coeffs.view(),
1191            2,
1192            ExtrapolateMode::Extrapolate,
1193            CacheConfig {
1194                track_stats: true,
1195                ..Default::default()
1196            },
1197        )
1198        .expect("Operation failed");
1199
1200        // First evaluation should result in cache misses
1201        let _ = cached_spline
1202            .evaluate_cached(1.5)
1203            .expect("Operation failed");
1204        let stats_after_first = cached_spline.cache_stats();
1205        assert!(stats_after_first.misses > 0);
1206
1207        // Second evaluation at the same point should result in cache hits
1208        let _ = cached_spline
1209            .evaluate_cached(1.5)
1210            .expect("Operation failed");
1211        let stats_after_second = cached_spline.cache_stats();
1212        assert!(stats_after_second.hits > 0);
1213    }
1214
1215    #[test]
1216    fn test_distance_matrix_cache() {
1217        let config = CacheConfig {
1218            track_stats: true,
1219            max_distance_cache_size: 10,
1220            ..Default::default()
1221        };
1222        let mut cache = DistanceMatrixCache::<f64>::new(config);
1223
1224        // Create test points
1225        let points = Array2::from_shape_vec((3, 2), vec![0.0, 0.0, 1.0, 0.0, 0.0, 1.0])
1226            .expect("Operation failed");
1227
1228        // First computation should be a cache miss
1229        let result1 = cache.get_or_compute_distance_matrix(&points, |pts| {
1230            let n = pts.nrows();
1231            let mut distances = Array2::zeros((n, n));
1232            for i in 0..n {
1233                for j in 0..n {
1234                    let diff = &pts.slice(scirs2_core::ndarray::s![i, ..])
1235                        - &pts.slice(scirs2_core::ndarray::s![j, ..]);
1236                    distances[[i, j]] = diff.iter().map(|&x| x * x).sum::<f64>().sqrt();
1237                }
1238            }
1239            distances
1240        });
1241
1242        assert_eq!(result1.shape(), &[3, 3]);
1243        assert_eq!(cache.stats().misses, 1);
1244        assert_eq!(cache.stats().hits, 0);
1245
1246        // Second computation with same points should be a cache hit
1247        let result2 = cache.get_or_compute_distance_matrix(&points, |_| {
1248            panic!("Should not be called on cache hit");
1249        });
1250
1251        assert_eq!(result1, result2);
1252        assert_eq!(cache.stats().misses, 1);
1253        assert_eq!(cache.stats().hits, 1);
1254
1255        // Different points should be a cache miss
1256        let different_points =
1257            Array2::from_shape_vec((2, 2), vec![2.0, 2.0, 3.0, 3.0]).expect("Operation failed");
1258        let _result3 = cache.get_or_compute_distance_matrix(&different_points, |pts| {
1259            let n = pts.nrows();
1260            Array2::zeros((n, n))
1261        });
1262
1263        assert_eq!(cache.stats().misses, 2);
1264        assert_eq!(cache.stats().hits, 1);
1265    }
1266}