1use parking_lot::Mutex;
32use std::collections::HashMap;
33use std::hash::Hash;
34use std::sync::atomic::{AtomicU64, Ordering};
35use std::time::Instant;
36
37use crate::query::plan::LogicalPlan;
38use crate::query::processor::QueryLanguage;
39
40#[derive(Clone, Eq, PartialEq, Hash)]
42pub struct CacheKey {
43 query: String,
45 language: QueryLanguage,
47}
48
49impl CacheKey {
50 #[must_use]
52 pub fn new(query: impl Into<String>, language: QueryLanguage) -> Self {
53 Self {
54 query: normalize_query(&query.into()),
55 language,
56 }
57 }
58
59 #[must_use]
61 pub fn query(&self) -> &str {
62 &self.query
63 }
64
65 #[must_use]
67 pub fn language(&self) -> QueryLanguage {
68 self.language
69 }
70}
71
72fn normalize_query(query: &str) -> String {
76 query.split_whitespace().collect::<Vec<_>>().join(" ")
78}
79
80struct CacheEntry<T> {
82 value: T,
84 access_count: u64,
86 #[cfg(not(target_arch = "wasm32"))]
88 last_accessed: Instant,
89}
90
91impl<T: Clone> CacheEntry<T> {
92 fn new(value: T) -> Self {
93 Self {
94 value,
95 access_count: 0,
96 #[cfg(not(target_arch = "wasm32"))]
97 last_accessed: Instant::now(),
98 }
99 }
100
101 fn access(&mut self) -> T {
102 self.access_count += 1;
103 #[cfg(not(target_arch = "wasm32"))]
104 {
105 self.last_accessed = Instant::now();
106 }
107 self.value.clone()
108 }
109}
110
111struct LruCache<K, V> {
113 entries: HashMap<K, CacheEntry<V>>,
115 capacity: usize,
117 access_order: Vec<K>,
119}
120
121impl<K: Clone + Eq + Hash, V: Clone> LruCache<K, V> {
122 fn new(capacity: usize) -> Self {
123 Self {
124 entries: HashMap::with_capacity(capacity),
125 capacity,
126 access_order: Vec::with_capacity(capacity),
127 }
128 }
129
130 fn get(&mut self, key: &K) -> Option<V> {
131 if let Some(entry) = self.entries.get_mut(key) {
132 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
134 self.access_order.remove(pos);
135 }
136 self.access_order.push(key.clone());
137 Some(entry.access())
138 } else {
139 None
140 }
141 }
142
143 fn put(&mut self, key: K, value: V) {
144 if self.entries.len() >= self.capacity && !self.entries.contains_key(&key) {
146 self.evict_lru();
147 }
148
149 if let Some(pos) = self.access_order.iter().position(|k| k == &key) {
151 self.access_order.remove(pos);
152 }
153
154 self.access_order.push(key.clone());
156 self.entries.insert(key, CacheEntry::new(value));
157 }
158
159 fn evict_lru(&mut self) {
160 if let Some(key) = self.access_order.first().cloned() {
161 self.access_order.remove(0);
162 self.entries.remove(&key);
163 }
164 }
165
166 fn clear(&mut self) {
167 self.entries.clear();
168 self.access_order.clear();
169 }
170
171 fn len(&self) -> usize {
172 self.entries.len()
173 }
174
175 fn remove(&mut self, key: &K) -> Option<V> {
176 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
177 self.access_order.remove(pos);
178 }
179 self.entries.remove(key).map(|e| e.value)
180 }
181}
182
183pub struct QueryCache {
185 parsed_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
187 optimized_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
189 parsed_hits: AtomicU64,
191 parsed_misses: AtomicU64,
193 optimized_hits: AtomicU64,
195 optimized_misses: AtomicU64,
197 invalidations: AtomicU64,
199 enabled: bool,
201}
202
203impl QueryCache {
204 #[must_use]
209 pub fn new(capacity: usize) -> Self {
210 let half_capacity = capacity / 2;
211 Self {
212 parsed_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
213 optimized_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
214 parsed_hits: AtomicU64::new(0),
215 parsed_misses: AtomicU64::new(0),
216 optimized_hits: AtomicU64::new(0),
217 optimized_misses: AtomicU64::new(0),
218 invalidations: AtomicU64::new(0),
219 enabled: true,
220 }
221 }
222
223 #[must_use]
225 pub fn disabled() -> Self {
226 Self {
227 parsed_cache: Mutex::new(LruCache::new(0)),
228 optimized_cache: Mutex::new(LruCache::new(0)),
229 parsed_hits: AtomicU64::new(0),
230 parsed_misses: AtomicU64::new(0),
231 optimized_hits: AtomicU64::new(0),
232 optimized_misses: AtomicU64::new(0),
233 invalidations: AtomicU64::new(0),
234 enabled: false,
235 }
236 }
237
238 #[must_use]
240 pub fn is_enabled(&self) -> bool {
241 self.enabled
242 }
243
244 pub fn get_parsed(&self, key: &CacheKey) -> Option<LogicalPlan> {
246 if !self.enabled {
247 return None;
248 }
249
250 let result = self.parsed_cache.lock().get(key);
251 if result.is_some() {
252 self.parsed_hits.fetch_add(1, Ordering::Relaxed);
253 } else {
254 self.parsed_misses.fetch_add(1, Ordering::Relaxed);
255 }
256 result
257 }
258
259 pub fn put_parsed(&self, key: CacheKey, plan: LogicalPlan) {
261 if !self.enabled {
262 return;
263 }
264 self.parsed_cache.lock().put(key, plan);
265 }
266
267 pub fn get_optimized(&self, key: &CacheKey) -> Option<LogicalPlan> {
269 if !self.enabled {
270 return None;
271 }
272
273 let result = self.optimized_cache.lock().get(key);
274 if result.is_some() {
275 self.optimized_hits.fetch_add(1, Ordering::Relaxed);
276 } else {
277 self.optimized_misses.fetch_add(1, Ordering::Relaxed);
278 }
279 result
280 }
281
282 pub fn put_optimized(&self, key: CacheKey, plan: LogicalPlan) {
284 if !self.enabled {
285 return;
286 }
287 self.optimized_cache.lock().put(key, plan);
288 }
289
290 pub fn invalidate(&self, key: &CacheKey) {
292 self.parsed_cache.lock().remove(key);
293 self.optimized_cache.lock().remove(key);
294 }
295
296 pub fn clear(&self) {
299 let had_entries =
300 self.parsed_cache.lock().len() > 0 || self.optimized_cache.lock().len() > 0;
301 self.parsed_cache.lock().clear();
302 self.optimized_cache.lock().clear();
303 if had_entries {
304 self.invalidations.fetch_add(1, Ordering::Relaxed);
305 }
306 }
307
308 #[must_use]
310 pub fn stats(&self) -> CacheStats {
311 CacheStats {
312 parsed_size: self.parsed_cache.lock().len(),
313 optimized_size: self.optimized_cache.lock().len(),
314 parsed_hits: self.parsed_hits.load(Ordering::Relaxed),
315 parsed_misses: self.parsed_misses.load(Ordering::Relaxed),
316 optimized_hits: self.optimized_hits.load(Ordering::Relaxed),
317 optimized_misses: self.optimized_misses.load(Ordering::Relaxed),
318 invalidations: self.invalidations.load(Ordering::Relaxed),
319 }
320 }
321
322 pub fn reset_stats(&self) {
324 self.parsed_hits.store(0, Ordering::Relaxed);
325 self.parsed_misses.store(0, Ordering::Relaxed);
326 self.optimized_hits.store(0, Ordering::Relaxed);
327 self.optimized_misses.store(0, Ordering::Relaxed);
328 self.invalidations.store(0, Ordering::Relaxed);
329 }
330}
331
332impl Default for QueryCache {
333 fn default() -> Self {
334 Self::new(1000)
336 }
337}
338
339#[derive(Debug, Clone)]
341pub struct CacheStats {
342 pub parsed_size: usize,
344 pub optimized_size: usize,
346 pub parsed_hits: u64,
348 pub parsed_misses: u64,
350 pub optimized_hits: u64,
352 pub optimized_misses: u64,
354 pub invalidations: u64,
356}
357
358impl CacheStats {
359 #[must_use]
361 pub fn parsed_hit_rate(&self) -> f64 {
362 let total = self.parsed_hits + self.parsed_misses;
363 if total == 0 {
364 0.0
365 } else {
366 self.parsed_hits as f64 / total as f64
367 }
368 }
369
370 #[must_use]
372 pub fn optimized_hit_rate(&self) -> f64 {
373 let total = self.optimized_hits + self.optimized_misses;
374 if total == 0 {
375 0.0
376 } else {
377 self.optimized_hits as f64 / total as f64
378 }
379 }
380
381 #[must_use]
383 pub fn total_size(&self) -> usize {
384 self.parsed_size + self.optimized_size
385 }
386
387 #[must_use]
389 pub fn total_hit_rate(&self) -> f64 {
390 let total_hits = self.parsed_hits + self.optimized_hits;
391 let total_misses = self.parsed_misses + self.optimized_misses;
392 let total = total_hits + total_misses;
393 if total == 0 {
394 0.0
395 } else {
396 total_hits as f64 / total as f64
397 }
398 }
399}
400
401pub struct CachingQueryProcessor<P> {
406 processor: P,
408 cache: QueryCache,
410}
411
412impl<P> CachingQueryProcessor<P> {
413 pub fn new(processor: P, cache: QueryCache) -> Self {
415 Self { processor, cache }
416 }
417
418 pub fn with_default_cache(processor: P) -> Self {
420 Self::new(processor, QueryCache::default())
421 }
422
423 #[must_use]
425 pub fn cache(&self) -> &QueryCache {
426 &self.cache
427 }
428
429 #[must_use]
431 pub fn processor(&self) -> &P {
432 &self.processor
433 }
434
435 #[must_use]
437 pub fn stats(&self) -> CacheStats {
438 self.cache.stats()
439 }
440
441 pub fn clear_cache(&self) {
443 self.cache.clear();
444 }
445}
446
447#[cfg(test)]
448mod tests {
449 use super::*;
450
451 #[cfg(feature = "gql")]
452 fn test_language() -> QueryLanguage {
453 QueryLanguage::Gql
454 }
455
456 #[cfg(not(feature = "gql"))]
457 fn test_language() -> QueryLanguage {
458 #[cfg(feature = "cypher")]
460 return QueryLanguage::Cypher;
461 #[cfg(feature = "sparql")]
462 return QueryLanguage::Sparql;
463 }
464
465 #[test]
466 fn test_cache_key_normalization() {
467 let key1 = CacheKey::new("MATCH (n) RETURN n", test_language());
468 let key2 = CacheKey::new("MATCH (n) RETURN n", test_language());
469
470 assert_eq!(key1.query(), key2.query());
472 }
473
474 #[test]
475 fn test_cache_basic_operations() {
476 let cache = QueryCache::new(10);
477 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
478
479 use crate::query::plan::{LogicalOperator, LogicalPlan};
481 let plan = LogicalPlan::new(LogicalOperator::Empty);
482
483 assert!(cache.get_parsed(&key).is_none());
485
486 cache.put_parsed(key.clone(), plan.clone());
488 assert!(cache.get_parsed(&key).is_some());
489
490 let stats = cache.stats();
492 assert_eq!(stats.parsed_size, 1);
493 assert_eq!(stats.parsed_hits, 1);
494 assert_eq!(stats.parsed_misses, 1);
495 }
496
497 #[test]
498 fn test_cache_lru_eviction() {
499 let cache = QueryCache::new(4); use crate::query::plan::{LogicalOperator, LogicalPlan};
502
503 for i in 0..3 {
505 let key = CacheKey::new(format!("QUERY {}", i), test_language());
506 cache.put_parsed(key, LogicalPlan::new(LogicalOperator::Empty));
507 }
508
509 let key0 = CacheKey::new("QUERY 0", test_language());
511 assert!(cache.get_parsed(&key0).is_none());
512
513 let key1 = CacheKey::new("QUERY 1", test_language());
515 let key2 = CacheKey::new("QUERY 2", test_language());
516 assert!(cache.get_parsed(&key1).is_some());
517 assert!(cache.get_parsed(&key2).is_some());
518 }
519
520 #[test]
521 fn test_cache_invalidation() {
522 let cache = QueryCache::new(10);
523 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
524
525 use crate::query::plan::{LogicalOperator, LogicalPlan};
526 let plan = LogicalPlan::new(LogicalOperator::Empty);
527
528 cache.put_parsed(key.clone(), plan.clone());
529 cache.put_optimized(key.clone(), plan);
530
531 assert!(cache.get_parsed(&key).is_some());
532 assert!(cache.get_optimized(&key).is_some());
533
534 cache.invalidate(&key);
536
537 cache.reset_stats();
539
540 assert!(cache.get_parsed(&key).is_none());
541 assert!(cache.get_optimized(&key).is_none());
542 }
543
544 #[test]
545 fn test_cache_disabled() {
546 let cache = QueryCache::disabled();
547 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
548
549 use crate::query::plan::{LogicalOperator, LogicalPlan};
550 let plan = LogicalPlan::new(LogicalOperator::Empty);
551
552 cache.put_parsed(key.clone(), plan);
554 assert!(cache.get_parsed(&key).is_none());
555
556 let stats = cache.stats();
558 assert_eq!(stats.parsed_size, 0);
559 }
560
561 #[test]
562 fn test_cache_stats() {
563 let cache = QueryCache::new(10);
564
565 use crate::query::plan::{LogicalOperator, LogicalPlan};
566
567 let key1 = CacheKey::new("QUERY 1", test_language());
568 let key2 = CacheKey::new("QUERY 2", test_language());
569 let plan = LogicalPlan::new(LogicalOperator::Empty);
570
571 cache.get_optimized(&key1);
573
574 cache.put_optimized(key1.clone(), plan);
576 cache.get_optimized(&key1);
577 cache.get_optimized(&key1);
578
579 cache.get_optimized(&key2);
581
582 let stats = cache.stats();
583 assert_eq!(stats.optimized_hits, 2);
584 assert_eq!(stats.optimized_misses, 2);
585 assert!((stats.optimized_hit_rate() - 0.5).abs() < 0.01);
586 }
587}