Skip to main content

oxirs_core/query/
plan_cache.rs

1//! Query plan caching for improved performance
2//!
3//! This module provides an LRU cache for compiled query plans with persistence support.
4
5use crate::query::plan::ExecutionPlan;
6use crate::OxirsError;
7use lru::LruCache;
8use scirs2_core::metrics::Counter;
9use serde::{Deserialize, Serialize};
10use std::collections::hash_map::DefaultHasher;
11use std::hash::{Hash, Hasher};
12use std::num::NonZeroUsize;
13use std::path::Path;
14use std::sync::{Arc, RwLock};
15use std::time::Instant;
16
17/// Query plan cache with LRU eviction
18///
19/// Caches compiled execution plans to avoid repeated query compilation
20/// and optimization overhead.
21///
22/// This is the full-featured LRU implementation backed by the `lru` crate and
23/// SciRS2 metrics.  For a simpler, self-contained cache see [`QueryPlanCache`] below.
24pub struct LruQueryPlanCache {
25    /// LRU cache for execution plans
26    cache: Arc<RwLock<LruCache<u64, CachedPlan>>>,
27    /// Cache statistics
28    stats: Arc<RwLock<CacheStatistics>>,
29    /// Metrics counters
30    hit_counter: Counter,
31    miss_counter: Counter,
32    eviction_counter: Counter,
33    /// Cache configuration
34    config: CacheConfig,
35}
36
37/// Cached execution plan with metadata
38#[derive(Debug, Clone, Serialize, Deserialize)]
39pub struct CachedPlan {
40    /// The compiled execution plan
41    pub plan: SerializablePlan,
42    /// Query signature (hash)
43    pub signature: u64,
44    /// When the plan was cached
45    pub cached_at_ms: u128,
46    /// How many times this plan was accessed
47    pub access_count: u64,
48    /// Last access time
49    pub last_accessed_ms: u128,
50    /// Estimated execution cost
51    pub estimated_cost: f64,
52    /// Average actual execution time (milliseconds)
53    pub avg_execution_time_ms: f64,
54}
55
56/// Serializable representation of execution plan
57#[derive(Debug, Clone, Serialize, Deserialize)]
58pub enum SerializablePlan {
59    /// Triple scan operation
60    TripleScan { pattern_desc: String },
61    /// Hash join operation
62    HashJoin {
63        left: Box<SerializablePlan>,
64        right: Box<SerializablePlan>,
65        join_vars: Vec<String>,
66    },
67    /// Filter operation
68    Filter {
69        input: Box<SerializablePlan>,
70        expr_desc: String,
71    },
72    /// Projection operation
73    Project {
74        input: Box<SerializablePlan>,
75        variables: Vec<String>,
76    },
77    /// Union operation
78    Union {
79        left: Box<SerializablePlan>,
80        right: Box<SerializablePlan>,
81    },
82    /// Empty plan
83    Empty,
84}
85
86/// Cache configuration
87#[derive(Debug, Clone)]
88pub struct CacheConfig {
89    /// Maximum number of cached plans
90    pub max_size: usize,
91    /// Enable persistence to disk
92    pub enable_persistence: bool,
93    /// Path for persisted cache
94    pub persistence_path: Option<String>,
95    /// Time-to-live for cached plans (milliseconds)
96    pub ttl_ms: Option<u128>,
97}
98
99impl Default for CacheConfig {
100    fn default() -> Self {
101        Self {
102            max_size: 1000,
103            enable_persistence: false,
104            persistence_path: None,
105            ttl_ms: Some(3_600_000), // 1 hour
106        }
107    }
108}
109
110/// Cache statistics
111#[derive(Debug, Clone, Default, Serialize, Deserialize)]
112pub struct CacheStatistics {
113    /// Total cache hits
114    pub hits: u64,
115    /// Total cache misses
116    pub misses: u64,
117    /// Total evictions
118    pub evictions: u64,
119    /// Current cache size
120    pub current_size: usize,
121    /// Total plans cached
122    pub total_cached: u64,
123}
124
125impl LruQueryPlanCache {
126    /// Create a new query plan cache
127    pub fn new(config: CacheConfig) -> Self {
128        let capacity = NonZeroUsize::new(config.max_size)
129            .unwrap_or(NonZeroUsize::new(1000).expect("1000 is non-zero"));
130
131        Self {
132            cache: Arc::new(RwLock::new(LruCache::new(capacity))),
133            stats: Arc::new(RwLock::new(CacheStatistics::default())),
134            hit_counter: Counter::new("plan_cache.hits".to_string()),
135            miss_counter: Counter::new("plan_cache.misses".to_string()),
136            eviction_counter: Counter::new("plan_cache.evictions".to_string()),
137            config,
138        }
139    }
140
141    /// Get a cached plan for a query string
142    pub fn get(&self, query: &str) -> Option<CachedPlan> {
143        let signature = Self::compute_signature(query);
144
145        let start = Instant::now();
146
147        let result = {
148            let mut cache = self.cache.write().ok()?;
149            cache.get_mut(&signature).cloned()
150        };
151
152        let _elapsed = start.elapsed(); // Track elapsed time for future use
153
154        if let Some(mut plan) = result {
155            // Update access metrics
156            plan.access_count += 1;
157            plan.last_accessed_ms = Instant::now().elapsed().as_millis();
158
159            // Check TTL
160            if let Some(ttl) = self.config.ttl_ms {
161                let age = plan.last_accessed_ms - plan.cached_at_ms;
162                if age > ttl {
163                    // Expired, remove from cache
164                    self.remove(query);
165                    self.record_miss();
166                    return None;
167                }
168            }
169
170            // Update cache with new access info
171            if let Ok(mut cache) = self.cache.write() {
172                cache.put(signature, plan.clone());
173            }
174
175            self.record_hit();
176            Some(plan)
177        } else {
178            self.record_miss();
179            None
180        }
181    }
182
183    /// Put a compiled plan into the cache
184    pub fn put(
185        &self,
186        query: &str,
187        plan: ExecutionPlan,
188        estimated_cost: f64,
189    ) -> Result<(), OxirsError> {
190        let signature = Self::compute_signature(query);
191        let serializable = Self::convert_to_serializable(&plan);
192
193        let cached_plan = CachedPlan {
194            plan: serializable,
195            signature,
196            cached_at_ms: Instant::now().elapsed().as_millis(),
197            access_count: 0,
198            last_accessed_ms: Instant::now().elapsed().as_millis(),
199            estimated_cost,
200            avg_execution_time_ms: 0.0,
201        };
202
203        let mut cache = self
204            .cache
205            .write()
206            .map_err(|e| OxirsError::Query(format!("Failed to write cache: {}", e)))?;
207
208        // Check if we're evicting an entry
209        let will_evict = cache.len() >= cache.cap().get();
210        if will_evict {
211            self.record_eviction();
212        }
213
214        cache.put(signature, cached_plan);
215
216        // Update statistics
217        let mut stats = self
218            .stats
219            .write()
220            .map_err(|e| OxirsError::Query(format!("Failed to write stats: {}", e)))?;
221        stats.current_size = cache.len();
222        stats.total_cached += 1;
223
224        Ok(())
225    }
226
227    /// Remove a cached plan
228    pub fn remove(&self, query: &str) -> Option<CachedPlan> {
229        let signature = Self::compute_signature(query);
230
231        self.cache.write().ok()?.pop(&signature)
232    }
233
234    /// Clear all cached plans
235    pub fn clear(&self) -> Result<(), OxirsError> {
236        let mut cache = self
237            .cache
238            .write()
239            .map_err(|e| OxirsError::Query(format!("Failed to write cache: {}", e)))?;
240        cache.clear();
241
242        let mut stats = self
243            .stats
244            .write()
245            .map_err(|e| OxirsError::Query(format!("Failed to write stats: {}", e)))?;
246        stats.current_size = 0;
247
248        Ok(())
249    }
250
251    /// Get cache statistics
252    pub fn statistics(&self) -> CacheStatistics {
253        self.stats
254            .read()
255            .ok()
256            .map(|s| s.clone())
257            .unwrap_or_default()
258    }
259
260    /// Get cache hit rate
261    pub fn hit_rate(&self) -> f64 {
262        let stats = self.statistics();
263        let total = stats.hits + stats.misses;
264        if total == 0 {
265            return 0.0;
266        }
267        stats.hits as f64 / total as f64
268    }
269
270    /// Persist cache to disk
271    pub fn persist(&self) -> Result<(), OxirsError> {
272        if !self.config.enable_persistence {
273            return Ok(());
274        }
275
276        let path = self
277            .config
278            .persistence_path
279            .as_ref()
280            .ok_or_else(|| OxirsError::Io("No persistence path configured".to_string()))?;
281
282        let cache = self
283            .cache
284            .read()
285            .map_err(|e| OxirsError::Query(format!("Failed to read cache: {}", e)))?;
286
287        // Convert cache to serializable format
288        let entries: Vec<(u64, CachedPlan)> = cache.iter().map(|(k, v)| (*k, v.clone())).collect();
289
290        let json = serde_json::to_string_pretty(&entries)
291            .map_err(|e| OxirsError::Serialize(e.to_string()))?;
292
293        std::fs::write(path, json).map_err(|e| OxirsError::Io(e.to_string()))?;
294
295        tracing::info!("Persisted {} cached plans to {}", entries.len(), path);
296
297        Ok(())
298    }
299
300    /// Load cache from disk
301    pub fn load(&self) -> Result<(), OxirsError> {
302        if !self.config.enable_persistence {
303            return Ok(());
304        }
305
306        let path = self
307            .config
308            .persistence_path
309            .as_ref()
310            .ok_or_else(|| OxirsError::Io("No persistence path configured".to_string()))?;
311
312        if !Path::new(path).exists() {
313            return Ok(()); // No cached data to load
314        }
315
316        let json = std::fs::read_to_string(path).map_err(|e| OxirsError::Io(e.to_string()))?;
317
318        let entries: Vec<(u64, CachedPlan)> =
319            serde_json::from_str(&json).map_err(|e| OxirsError::Parse(e.to_string()))?;
320
321        let mut cache = self
322            .cache
323            .write()
324            .map_err(|e| OxirsError::Query(format!("Failed to write cache: {}", e)))?;
325
326        for (sig, plan) in entries {
327            cache.put(sig, plan);
328        }
329
330        tracing::info!("Loaded {} cached plans from {}", cache.len(), path);
331
332        Ok(())
333    }
334
335    /// Update execution time for a cached plan
336    pub fn update_execution_time(
337        &self,
338        query: &str,
339        execution_time_ms: f64,
340    ) -> Result<(), OxirsError> {
341        let signature = Self::compute_signature(query);
342
343        let mut cache = self
344            .cache
345            .write()
346            .map_err(|e| OxirsError::Query(format!("Failed to write cache: {}", e)))?;
347
348        if let Some(plan) = cache.get_mut(&signature) {
349            // Update average execution time with exponential moving average
350            let alpha = 0.3; // Weight for new measurement
351            if plan.avg_execution_time_ms == 0.0 {
352                plan.avg_execution_time_ms = execution_time_ms;
353            } else {
354                plan.avg_execution_time_ms =
355                    alpha * execution_time_ms + (1.0 - alpha) * plan.avg_execution_time_ms;
356            }
357        }
358
359        Ok(())
360    }
361
362    // Private helper methods
363
364    fn compute_signature(query: &str) -> u64 {
365        let mut hasher = DefaultHasher::new();
366        query.hash(&mut hasher);
367        hasher.finish()
368    }
369
370    fn convert_to_serializable(plan: &ExecutionPlan) -> SerializablePlan {
371        match plan {
372            ExecutionPlan::TripleScan { pattern } => SerializablePlan::TripleScan {
373                pattern_desc: format!("{:?}", pattern),
374            },
375            ExecutionPlan::HashJoin {
376                left,
377                right,
378                join_vars,
379            } => SerializablePlan::HashJoin {
380                left: Box::new(Self::convert_to_serializable(left)),
381                right: Box::new(Self::convert_to_serializable(right)),
382                join_vars: join_vars.iter().map(|v| format!("{:?}", v)).collect(),
383            },
384            ExecutionPlan::Filter { input, condition } => SerializablePlan::Filter {
385                input: Box::new(Self::convert_to_serializable(input)),
386                expr_desc: format!("{:?}", condition),
387            },
388            ExecutionPlan::Project { input, vars } => SerializablePlan::Project {
389                input: Box::new(Self::convert_to_serializable(input)),
390                variables: vars.iter().map(|v| format!("{:?}", v)).collect(),
391            },
392            ExecutionPlan::Union { left, right } => SerializablePlan::Union {
393                left: Box::new(Self::convert_to_serializable(left)),
394                right: Box::new(Self::convert_to_serializable(right)),
395            },
396            _ => SerializablePlan::Empty,
397        }
398    }
399
400    fn record_hit(&self) {
401        self.hit_counter.add(1);
402        if let Ok(mut stats) = self.stats.write() {
403            stats.hits += 1;
404        }
405    }
406
407    fn record_miss(&self) {
408        self.miss_counter.add(1);
409        if let Ok(mut stats) = self.stats.write() {
410            stats.misses += 1;
411        }
412    }
413
414    fn record_eviction(&self) {
415        self.eviction_counter.add(1);
416        if let Ok(mut stats) = self.stats.write() {
417            stats.evictions += 1;
418        }
419    }
420}
421
422#[cfg(test)]
423mod tests {
424    use super::*;
425
426    #[test]
427    fn test_cache_creation() {
428        let config = CacheConfig::default();
429        let cache = LruQueryPlanCache::new(config);
430
431        let stats = cache.statistics();
432        assert_eq!(stats.hits, 0);
433        assert_eq!(stats.misses, 0);
434    }
435
436    #[test]
437    fn test_cache_put_get() {
438        let config = CacheConfig::default();
439        let cache = LruQueryPlanCache::new(config);
440
441        let query = "SELECT ?s ?p ?o WHERE { ?s ?p ?o }";
442        let plan = ExecutionPlan::TripleScan {
443            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
444        };
445
446        cache
447            .put(query, plan, 100.0)
448            .expect("cache put should succeed");
449
450        let cached = cache.get(query);
451        assert!(cached.is_some());
452        assert_eq!(
453            cached.expect("cached value should exist").estimated_cost,
454            100.0
455        );
456    }
457
458    #[test]
459    fn test_cache_miss() {
460        let config = CacheConfig::default();
461        let cache = LruQueryPlanCache::new(config);
462
463        let result = cache.get("SELECT ?s WHERE { ?s ?p ?o }");
464        assert!(result.is_none());
465
466        let stats = cache.statistics();
467        assert_eq!(stats.misses, 1);
468    }
469
470    #[test]
471    fn test_cache_remove() {
472        let config = CacheConfig::default();
473        let cache = LruQueryPlanCache::new(config);
474
475        let query = "SELECT ?s WHERE { ?s ?p ?o }";
476        let plan = ExecutionPlan::TripleScan {
477            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
478        };
479
480        cache
481            .put(query, plan, 50.0)
482            .expect("cache put should succeed");
483        assert!(cache.get(query).is_some());
484
485        cache.remove(query);
486        assert!(cache.get(query).is_none());
487    }
488
489    #[test]
490    fn test_cache_clear() {
491        let config = CacheConfig::default();
492        let cache = LruQueryPlanCache::new(config);
493
494        let plan = ExecutionPlan::TripleScan {
495            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
496        };
497
498        cache
499            .put("query1", plan.clone(), 50.0)
500            .expect("cache put should succeed");
501        cache
502            .put("query2", plan, 75.0)
503            .expect("cache put should succeed");
504
505        cache.clear().expect("cache clear should succeed");
506
507        let stats = cache.statistics();
508        assert_eq!(stats.current_size, 0);
509    }
510
511    #[test]
512    fn test_hit_rate() {
513        let config = CacheConfig::default();
514        let cache = LruQueryPlanCache::new(config);
515
516        let plan = ExecutionPlan::TripleScan {
517            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
518        };
519
520        let query = "SELECT * WHERE { ?s ?p ?o }";
521        cache
522            .put(query, plan, 100.0)
523            .expect("cache put should succeed");
524
525        // One hit
526        cache.get(query);
527        // One miss
528        cache.get("SELECT * WHERE { ?x ?y ?z }");
529
530        let hit_rate = cache.hit_rate();
531        assert!((hit_rate - 0.5).abs() < 0.01); // 50% hit rate
532    }
533
534    #[test]
535    fn test_lru_eviction() {
536        let config = CacheConfig {
537            max_size: 2, // Small cache for testing
538            ..Default::default()
539        };
540
541        let cache = LruQueryPlanCache::new(config);
542
543        let plan = ExecutionPlan::TripleScan {
544            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
545        };
546
547        cache
548            .put("query1", plan.clone(), 10.0)
549            .expect("cache put should succeed");
550        cache
551            .put("query2", plan.clone(), 20.0)
552            .expect("cache put should succeed");
553        cache
554            .put("query3", plan, 30.0)
555            .expect("cache put should succeed"); // Should evict query1
556
557        assert!(cache.get("query1").is_none()); // Evicted
558        assert!(cache.get("query2").is_some());
559        assert!(cache.get("query3").is_some());
560    }
561
562    #[test]
563    fn test_execution_time_update() {
564        let config = CacheConfig::default();
565        let cache = LruQueryPlanCache::new(config);
566
567        let query = "SELECT ?s WHERE { ?s ?p ?o }";
568        let plan = ExecutionPlan::TripleScan {
569            pattern: crate::model::pattern::TriplePattern::new(None, None, None),
570        };
571
572        cache
573            .put(query, plan, 100.0)
574            .expect("cache put should succeed");
575
576        cache
577            .update_execution_time(query, 50.0)
578            .expect("update should succeed");
579
580        let cached = cache.get(query).expect("cache get should succeed");
581        assert_eq!(cached.avg_execution_time_ms, 50.0);
582
583        cache
584            .update_execution_time(query, 70.0)
585            .expect("update should succeed");
586
587        let cached2 = cache.get(query).expect("cache get should succeed");
588        // Should be exponential moving average
589        assert!(cached2.avg_execution_time_ms > 50.0 && cached2.avg_execution_time_ms < 70.0);
590    }
591}
592
593// ===========================================================================
594// Simpler, self-contained QueryPlanCache
595//
596// A lightweight alternative to LruQueryPlanCache that does not depend on the
597// `lru` crate or SciRS2 metrics and is easier to use in unit tests and in
598// parts of the system that only need basic LRU semantics.
599// ===========================================================================
600
601use std::collections::HashMap;
602
603/// A compiled SPARQL query plan ready for execution.
604#[derive(Debug, Clone)]
605pub struct QueryPlan {
606    /// FNV / DefaultHasher hash of the original query string.
607    pub query_hash: u64,
608    /// The raw SPARQL query string.
609    pub original_query: String,
610    /// Human-readable descriptions of the optimized pattern steps.
611    pub optimized_patterns: Vec<String>,
612    /// Estimated execution cost (lower is better).
613    pub estimated_cost: f64,
614    /// Unix timestamp (ms) when this plan was created.
615    pub created_at_ms: i64,
616}
617
618impl QueryPlan {
619    fn now_ms() -> i64 {
620        use std::time::{SystemTime, UNIX_EPOCH};
621        SystemTime::now()
622            .duration_since(UNIX_EPOCH)
623            .map(|d| d.as_millis() as i64)
624            .unwrap_or(0)
625    }
626
627    /// Construct a new plan.  `created_at_ms` is set to the current time.
628    pub fn new(
629        query_hash: u64,
630        original_query: impl Into<String>,
631        optimized_patterns: Vec<String>,
632        estimated_cost: f64,
633    ) -> Self {
634        Self {
635            query_hash,
636            original_query: original_query.into(),
637            optimized_patterns,
638            estimated_cost,
639            created_at_ms: Self::now_ms(),
640        }
641    }
642}
643
644/// Aggregated statistics for a [`QueryPlanCache`] instance.
645#[derive(Debug, Clone, Default)]
646pub struct PlanCacheStats {
647    /// Number of successful cache lookups.
648    pub hits: u64,
649    /// Number of failed cache lookups.
650    pub misses: u64,
651    /// Number of plans evicted to make room for new entries.
652    pub evictions: u64,
653    /// Current number of entries in the cache.
654    pub size: usize,
655}
656
657/// A simple LRU query-plan cache with O(n) eviction.
658///
659/// This is deliberately kept dependency-free (no `lru` crate, no SciRS2).
660/// The access order vector has the most-recently-used entry at the **front**
661/// (index 0) and the least-recently-used at the **back**.
662///
663/// For the high-throughput LRU cache backed by the `lru` crate, see
664/// [`LruQueryPlanCache`].
665pub struct QueryPlanCache {
666    plans: HashMap<u64, QueryPlan>,
667    /// Most-recently-used at index 0; least-recently-used at the back.
668    access_order: Vec<u64>,
669    max_size: usize,
670    stats: PlanCacheStats,
671}
672
673impl QueryPlanCache {
674    /// Create a new cache that can hold at most `max_size` plans.
675    ///
676    /// If `max_size` is 0 it is treated as 1 to avoid degenerate behaviour.
677    pub fn new(max_size: usize) -> Self {
678        let max_size = max_size.max(1);
679        Self {
680            plans: HashMap::new(),
681            access_order: Vec::new(),
682            max_size,
683            stats: PlanCacheStats::default(),
684        }
685    }
686
687    /// Retrieve the plan for `query_hash`.
688    ///
689    /// On a hit the entry is moved to the front of the access-order list and
690    /// `stats.hits` is incremented.  On a miss `stats.misses` is incremented.
691    pub fn get(&mut self, query_hash: u64) -> Option<&QueryPlan> {
692        if self.plans.contains_key(&query_hash) {
693            // Move to front of access_order.
694            if let Some(pos) = self.access_order.iter().position(|&h| h == query_hash) {
695                self.access_order.remove(pos);
696            }
697            self.access_order.insert(0, query_hash);
698            self.stats.hits += 1;
699            self.plans.get(&query_hash)
700        } else {
701            self.stats.misses += 1;
702            None
703        }
704    }
705
706    /// Insert a plan into the cache.
707    ///
708    /// If the cache is at capacity, [`evict_lru`] is called first.  The new
709    /// entry is placed at the front of the access-order list.
710    ///
711    /// [`evict_lru`]: QueryPlanCache::evict_lru
712    pub fn insert(&mut self, plan: QueryPlan) {
713        let hash = plan.query_hash;
714
715        // If the hash is already present, remove it from access_order so it
716        // can be re-inserted at the front.
717        if self.plans.contains_key(&hash) {
718            if let Some(pos) = self.access_order.iter().position(|&h| h == hash) {
719                self.access_order.remove(pos);
720            }
721        } else if self.plans.len() >= self.max_size {
722            self.evict_lru();
723        }
724
725        self.plans.insert(hash, plan);
726        self.access_order.insert(0, hash);
727        self.stats.size = self.plans.len();
728    }
729
730    /// Remove and return the least-recently-used plan (the one at the back of
731    /// the access-order list).  Increments `stats.evictions`.
732    ///
733    /// Returns `None` if the cache is empty.
734    pub fn evict_lru(&mut self) -> Option<QueryPlan> {
735        let lru_hash = self.access_order.pop()?;
736        let plan = self.plans.remove(&lru_hash);
737        self.stats.evictions += 1;
738        self.stats.size = self.plans.len();
739        plan
740    }
741
742    /// Remove a specific plan by hash.  Returns `true` if it existed.
743    pub fn invalidate(&mut self, query_hash: u64) -> bool {
744        let existed = self.plans.remove(&query_hash).is_some();
745        if existed {
746            if let Some(pos) = self.access_order.iter().position(|&h| h == query_hash) {
747                self.access_order.remove(pos);
748            }
749            self.stats.size = self.plans.len();
750        }
751        existed
752    }
753
754    /// Remove all entries and reset size to zero (other statistics are kept).
755    pub fn clear(&mut self) {
756        self.plans.clear();
757        self.access_order.clear();
758        self.stats.size = 0;
759    }
760
761    /// Return a snapshot of the current statistics.
762    pub fn stats(&self) -> &PlanCacheStats {
763        &self.stats
764    }
765
766    /// Cache hit rate: `hits / (hits + misses)`, or `0.0` if no operations
767    /// have been performed yet.
768    pub fn hit_rate(&self) -> f64 {
769        let total = self.stats.hits + self.stats.misses;
770        if total == 0 {
771            0.0
772        } else {
773            self.stats.hits as f64 / total as f64
774        }
775    }
776
777    /// Current number of cached plans.
778    pub fn len(&self) -> usize {
779        self.plans.len()
780    }
781
782    /// Return `true` if the cache contains no plans.
783    pub fn is_empty(&self) -> bool {
784        self.plans.is_empty()
785    }
786}
787
788// ---------------------------------------------------------------------------
789// Tests for the simple QueryPlanCache
790// ---------------------------------------------------------------------------
791
792#[cfg(test)]
793mod simple_cache_tests {
794    use super::*;
795
796    fn make_plan(hash: u64, query: &str, cost: f64) -> QueryPlan {
797        QueryPlan::new(hash, query, vec!["scan".to_string()], cost)
798    }
799
800    #[test]
801    fn test_simple_cache_new_is_empty() {
802        let cache = QueryPlanCache::new(10);
803        assert!(cache.is_empty());
804        assert_eq!(cache.len(), 0);
805    }
806
807    #[test]
808    fn test_simple_cache_insert_and_get_hit() {
809        let mut cache = QueryPlanCache::new(10);
810        cache.insert(make_plan(1, "SELECT ?s ?p ?o WHERE {?s ?p ?o}", 100.0));
811        let plan = cache.get(1);
812        assert!(plan.is_some());
813        assert_eq!(plan.expect("plan should exist").query_hash, 1);
814    }
815
816    #[test]
817    fn test_simple_cache_miss_increments_stat() {
818        let mut cache = QueryPlanCache::new(10);
819        assert!(cache.get(42).is_none());
820        assert_eq!(cache.stats().misses, 1);
821        assert_eq!(cache.stats().hits, 0);
822    }
823
824    #[test]
825    fn test_simple_cache_hit_increments_stat() {
826        let mut cache = QueryPlanCache::new(10);
827        cache.insert(make_plan(7, "SELECT * WHERE {?s ?p ?o}", 50.0));
828        cache.get(7);
829        assert_eq!(cache.stats().hits, 1);
830        assert_eq!(cache.stats().misses, 0);
831    }
832
833    #[test]
834    fn test_simple_cache_hit_rate_zero_initially() {
835        let cache = QueryPlanCache::new(5);
836        assert_eq!(cache.hit_rate(), 0.0);
837    }
838
839    #[test]
840    fn test_simple_cache_hit_rate_after_ops() {
841        let mut cache = QueryPlanCache::new(5);
842        cache.insert(make_plan(1, "q1", 10.0));
843        cache.get(1); // hit
844        cache.get(2); // miss
845        let rate = cache.hit_rate();
846        assert!((rate - 0.5).abs() < 1e-9);
847    }
848
849    #[test]
850    fn test_simple_cache_evict_lru_on_full() {
851        let mut cache = QueryPlanCache::new(2);
852        cache.insert(make_plan(1, "q1", 1.0)); // LRU = q1
853        cache.insert(make_plan(2, "q2", 2.0)); // LRU = q1, MRU = q2
854                                               // Insert q3 — q1 should be evicted.
855        cache.insert(make_plan(3, "q3", 3.0));
856        assert_eq!(cache.len(), 2);
857        assert!(cache.get(1).is_none(), "q1 should have been evicted");
858        assert!(cache.get(2).is_some());
859        assert!(cache.get(3).is_some());
860    }
861
862    #[test]
863    fn test_simple_cache_evict_lru_access_order() {
864        let mut cache = QueryPlanCache::new(2);
865        cache.insert(make_plan(1, "q1", 1.0));
866        cache.insert(make_plan(2, "q2", 2.0));
867        // Access q1 to make it MRU; q2 becomes LRU.
868        cache.get(1);
869        // Insert q3 — q2 should be evicted.
870        cache.insert(make_plan(3, "q3", 3.0));
871        assert!(cache.get(2).is_none(), "q2 should have been evicted");
872        assert!(cache.get(1).is_some());
873        assert!(cache.get(3).is_some());
874    }
875
876    #[test]
877    fn test_simple_cache_evict_lru_explicit() {
878        let mut cache = QueryPlanCache::new(5);
879        cache.insert(make_plan(1, "q1", 1.0));
880        cache.insert(make_plan(2, "q2", 2.0));
881        let evicted = cache.evict_lru();
882        assert!(evicted.is_some());
883        assert_eq!(cache.stats().evictions, 1);
884        assert_eq!(cache.len(), 1);
885    }
886
887    #[test]
888    fn test_simple_cache_evict_lru_empty_returns_none() {
889        let mut cache = QueryPlanCache::new(5);
890        assert!(cache.evict_lru().is_none());
891    }
892
893    #[test]
894    fn test_simple_cache_invalidate_existing() {
895        let mut cache = QueryPlanCache::new(5);
896        cache.insert(make_plan(99, "q99", 9.0));
897        let removed = cache.invalidate(99);
898        assert!(removed);
899        assert!(cache.is_empty());
900    }
901
902    #[test]
903    fn test_simple_cache_invalidate_missing_returns_false() {
904        let mut cache = QueryPlanCache::new(5);
905        assert!(!cache.invalidate(404));
906    }
907
908    #[test]
909    fn test_simple_cache_clear() {
910        let mut cache = QueryPlanCache::new(5);
911        cache.insert(make_plan(1, "q1", 1.0));
912        cache.insert(make_plan(2, "q2", 2.0));
913        cache.clear();
914        assert!(cache.is_empty());
915        assert_eq!(cache.stats().size, 0);
916    }
917
918    #[test]
919    fn test_simple_cache_plan_fields() {
920        let plan = QueryPlan::new(
921            42,
922            "SELECT * WHERE {?s ?p ?o}",
923            vec!["index_scan".to_string()],
924            2.5,
925        );
926        assert_eq!(plan.query_hash, 42);
927        assert_eq!(plan.optimized_patterns, vec!["index_scan".to_string()]);
928        assert!((plan.estimated_cost - 2.5).abs() < 1e-9);
929        assert!(plan.created_at_ms >= 0);
930    }
931
932    #[test]
933    fn test_simple_cache_stats_size_tracks_inserts() {
934        let mut cache = QueryPlanCache::new(10);
935        cache.insert(make_plan(1, "q1", 1.0));
936        cache.insert(make_plan(2, "q2", 2.0));
937        assert_eq!(cache.stats().size, 2);
938    }
939
940    #[test]
941    fn test_simple_cache_reinsertion_updates_access_order() {
942        let mut cache = QueryPlanCache::new(3);
943        cache.insert(make_plan(1, "q1", 1.0));
944        cache.insert(make_plan(2, "q2", 2.0));
945        // Re-insert q1; it should become MRU.  Then insert q3: q2 (LRU) is evicted.
946        cache.insert(make_plan(1, "q1", 1.5)); // update q1, now MRU
947                                               // Cache at capacity (2 distinct hashes since max_size=3 and we have 2).
948                                               // Insert q3 now — cache has room, no eviction.
949        cache.insert(make_plan(3, "q3", 3.0));
950        assert_eq!(cache.len(), 3); // 1, 2, 3 all present
951    }
952}