1use parking_lot::Mutex;
32use std::collections::HashMap;
33use std::hash::Hash;
34use std::sync::atomic::{AtomicU64, Ordering};
35use std::time::Instant;
36
37use crate::query::plan::LogicalPlan;
38use crate::query::processor::QueryLanguage;
39
40#[derive(Clone, Eq, PartialEq, Hash)]
42pub struct CacheKey {
43 query: String,
45 language: QueryLanguage,
47}
48
49impl CacheKey {
50 #[must_use]
52 pub fn new(query: impl Into<String>, language: QueryLanguage) -> Self {
53 Self {
54 query: normalize_query(&query.into()),
55 language,
56 }
57 }
58
59 #[must_use]
61 pub fn query(&self) -> &str {
62 &self.query
63 }
64
65 #[must_use]
67 pub fn language(&self) -> QueryLanguage {
68 self.language
69 }
70}
71
72fn normalize_query(query: &str) -> String {
76 query.split_whitespace().collect::<Vec<_>>().join(" ")
78}
79
80struct CacheEntry<T> {
82 value: T,
84 access_count: u64,
86 #[cfg(not(target_arch = "wasm32"))]
88 last_accessed: Instant,
89}
90
91impl<T: Clone> CacheEntry<T> {
92 fn new(value: T) -> Self {
93 Self {
94 value,
95 access_count: 0,
96 #[cfg(not(target_arch = "wasm32"))]
97 last_accessed: Instant::now(),
98 }
99 }
100
101 fn access(&mut self) -> T {
102 self.access_count += 1;
103 #[cfg(not(target_arch = "wasm32"))]
104 {
105 self.last_accessed = Instant::now();
106 }
107 self.value.clone()
108 }
109}
110
111struct LruCache<K, V> {
113 entries: HashMap<K, CacheEntry<V>>,
115 capacity: usize,
117 access_order: Vec<K>,
119}
120
121impl<K: Clone + Eq + Hash, V: Clone> LruCache<K, V> {
122 fn new(capacity: usize) -> Self {
123 Self {
124 entries: HashMap::with_capacity(capacity),
125 capacity,
126 access_order: Vec::with_capacity(capacity),
127 }
128 }
129
130 fn get(&mut self, key: &K) -> Option<V> {
131 if let Some(entry) = self.entries.get_mut(key) {
132 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
134 self.access_order.remove(pos);
135 }
136 self.access_order.push(key.clone());
137 Some(entry.access())
138 } else {
139 None
140 }
141 }
142
143 fn put(&mut self, key: K, value: V) {
144 if self.entries.len() >= self.capacity && !self.entries.contains_key(&key) {
146 self.evict_lru();
147 }
148
149 if let Some(pos) = self.access_order.iter().position(|k| k == &key) {
151 self.access_order.remove(pos);
152 }
153
154 self.access_order.push(key.clone());
156 self.entries.insert(key, CacheEntry::new(value));
157 }
158
159 fn evict_lru(&mut self) {
160 if let Some(key) = self.access_order.first().cloned() {
161 self.access_order.remove(0);
162 self.entries.remove(&key);
163 }
164 }
165
166 fn clear(&mut self) {
167 self.entries.clear();
168 self.access_order.clear();
169 }
170
171 fn len(&self) -> usize {
172 self.entries.len()
173 }
174
175 fn remove(&mut self, key: &K) -> Option<V> {
176 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
177 self.access_order.remove(pos);
178 }
179 self.entries.remove(key).map(|e| e.value)
180 }
181}
182
183pub struct QueryCache {
185 parsed_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
187 optimized_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
189 parsed_hits: AtomicU64,
191 parsed_misses: AtomicU64,
193 optimized_hits: AtomicU64,
195 optimized_misses: AtomicU64,
197 enabled: bool,
199}
200
201impl QueryCache {
202 #[must_use]
207 pub fn new(capacity: usize) -> Self {
208 let half_capacity = capacity / 2;
209 Self {
210 parsed_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
211 optimized_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
212 parsed_hits: AtomicU64::new(0),
213 parsed_misses: AtomicU64::new(0),
214 optimized_hits: AtomicU64::new(0),
215 optimized_misses: AtomicU64::new(0),
216 enabled: true,
217 }
218 }
219
220 #[must_use]
222 pub fn disabled() -> Self {
223 Self {
224 parsed_cache: Mutex::new(LruCache::new(0)),
225 optimized_cache: Mutex::new(LruCache::new(0)),
226 parsed_hits: AtomicU64::new(0),
227 parsed_misses: AtomicU64::new(0),
228 optimized_hits: AtomicU64::new(0),
229 optimized_misses: AtomicU64::new(0),
230 enabled: false,
231 }
232 }
233
234 #[must_use]
236 pub fn is_enabled(&self) -> bool {
237 self.enabled
238 }
239
240 pub fn get_parsed(&self, key: &CacheKey) -> Option<LogicalPlan> {
242 if !self.enabled {
243 return None;
244 }
245
246 let result = self.parsed_cache.lock().get(key);
247 if result.is_some() {
248 self.parsed_hits.fetch_add(1, Ordering::Relaxed);
249 } else {
250 self.parsed_misses.fetch_add(1, Ordering::Relaxed);
251 }
252 result
253 }
254
255 pub fn put_parsed(&self, key: CacheKey, plan: LogicalPlan) {
257 if !self.enabled {
258 return;
259 }
260 self.parsed_cache.lock().put(key, plan);
261 }
262
263 pub fn get_optimized(&self, key: &CacheKey) -> Option<LogicalPlan> {
265 if !self.enabled {
266 return None;
267 }
268
269 let result = self.optimized_cache.lock().get(key);
270 if result.is_some() {
271 self.optimized_hits.fetch_add(1, Ordering::Relaxed);
272 } else {
273 self.optimized_misses.fetch_add(1, Ordering::Relaxed);
274 }
275 result
276 }
277
278 pub fn put_optimized(&self, key: CacheKey, plan: LogicalPlan) {
280 if !self.enabled {
281 return;
282 }
283 self.optimized_cache.lock().put(key, plan);
284 }
285
286 pub fn invalidate(&self, key: &CacheKey) {
288 self.parsed_cache.lock().remove(key);
289 self.optimized_cache.lock().remove(key);
290 }
291
292 pub fn clear(&self) {
294 self.parsed_cache.lock().clear();
295 self.optimized_cache.lock().clear();
296 }
297
298 #[must_use]
300 pub fn stats(&self) -> CacheStats {
301 CacheStats {
302 parsed_size: self.parsed_cache.lock().len(),
303 optimized_size: self.optimized_cache.lock().len(),
304 parsed_hits: self.parsed_hits.load(Ordering::Relaxed),
305 parsed_misses: self.parsed_misses.load(Ordering::Relaxed),
306 optimized_hits: self.optimized_hits.load(Ordering::Relaxed),
307 optimized_misses: self.optimized_misses.load(Ordering::Relaxed),
308 }
309 }
310
311 pub fn reset_stats(&self) {
313 self.parsed_hits.store(0, Ordering::Relaxed);
314 self.parsed_misses.store(0, Ordering::Relaxed);
315 self.optimized_hits.store(0, Ordering::Relaxed);
316 self.optimized_misses.store(0, Ordering::Relaxed);
317 }
318}
319
320impl Default for QueryCache {
321 fn default() -> Self {
322 Self::new(1000)
324 }
325}
326
327#[derive(Debug, Clone)]
329pub struct CacheStats {
330 pub parsed_size: usize,
332 pub optimized_size: usize,
334 pub parsed_hits: u64,
336 pub parsed_misses: u64,
338 pub optimized_hits: u64,
340 pub optimized_misses: u64,
342}
343
344impl CacheStats {
345 #[must_use]
347 pub fn parsed_hit_rate(&self) -> f64 {
348 let total = self.parsed_hits + self.parsed_misses;
349 if total == 0 {
350 0.0
351 } else {
352 self.parsed_hits as f64 / total as f64
353 }
354 }
355
356 #[must_use]
358 pub fn optimized_hit_rate(&self) -> f64 {
359 let total = self.optimized_hits + self.optimized_misses;
360 if total == 0 {
361 0.0
362 } else {
363 self.optimized_hits as f64 / total as f64
364 }
365 }
366
367 #[must_use]
369 pub fn total_size(&self) -> usize {
370 self.parsed_size + self.optimized_size
371 }
372
373 #[must_use]
375 pub fn total_hit_rate(&self) -> f64 {
376 let total_hits = self.parsed_hits + self.optimized_hits;
377 let total_misses = self.parsed_misses + self.optimized_misses;
378 let total = total_hits + total_misses;
379 if total == 0 {
380 0.0
381 } else {
382 total_hits as f64 / total as f64
383 }
384 }
385}
386
387pub struct CachingQueryProcessor<P> {
392 processor: P,
394 cache: QueryCache,
396}
397
398impl<P> CachingQueryProcessor<P> {
399 pub fn new(processor: P, cache: QueryCache) -> Self {
401 Self { processor, cache }
402 }
403
404 pub fn with_default_cache(processor: P) -> Self {
406 Self::new(processor, QueryCache::default())
407 }
408
409 #[must_use]
411 pub fn cache(&self) -> &QueryCache {
412 &self.cache
413 }
414
415 #[must_use]
417 pub fn processor(&self) -> &P {
418 &self.processor
419 }
420
421 #[must_use]
423 pub fn stats(&self) -> CacheStats {
424 self.cache.stats()
425 }
426
427 pub fn clear_cache(&self) {
429 self.cache.clear();
430 }
431}
432
433#[cfg(test)]
434mod tests {
435 use super::*;
436
437 #[cfg(feature = "gql")]
438 fn test_language() -> QueryLanguage {
439 QueryLanguage::Gql
440 }
441
442 #[cfg(not(feature = "gql"))]
443 fn test_language() -> QueryLanguage {
444 #[cfg(feature = "cypher")]
446 return QueryLanguage::Cypher;
447 #[cfg(feature = "sparql")]
448 return QueryLanguage::Sparql;
449 }
450
451 #[test]
452 fn test_cache_key_normalization() {
453 let key1 = CacheKey::new("MATCH (n) RETURN n", test_language());
454 let key2 = CacheKey::new("MATCH (n) RETURN n", test_language());
455
456 assert_eq!(key1.query(), key2.query());
458 }
459
460 #[test]
461 fn test_cache_basic_operations() {
462 let cache = QueryCache::new(10);
463 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
464
465 use crate::query::plan::{LogicalOperator, LogicalPlan};
467 let plan = LogicalPlan::new(LogicalOperator::Empty);
468
469 assert!(cache.get_parsed(&key).is_none());
471
472 cache.put_parsed(key.clone(), plan.clone());
474 assert!(cache.get_parsed(&key).is_some());
475
476 let stats = cache.stats();
478 assert_eq!(stats.parsed_size, 1);
479 assert_eq!(stats.parsed_hits, 1);
480 assert_eq!(stats.parsed_misses, 1);
481 }
482
483 #[test]
484 fn test_cache_lru_eviction() {
485 let cache = QueryCache::new(4); use crate::query::plan::{LogicalOperator, LogicalPlan};
488
489 for i in 0..3 {
491 let key = CacheKey::new(format!("QUERY {}", i), test_language());
492 cache.put_parsed(key, LogicalPlan::new(LogicalOperator::Empty));
493 }
494
495 let key0 = CacheKey::new("QUERY 0", test_language());
497 assert!(cache.get_parsed(&key0).is_none());
498
499 let key1 = CacheKey::new("QUERY 1", test_language());
501 let key2 = CacheKey::new("QUERY 2", test_language());
502 assert!(cache.get_parsed(&key1).is_some());
503 assert!(cache.get_parsed(&key2).is_some());
504 }
505
506 #[test]
507 fn test_cache_invalidation() {
508 let cache = QueryCache::new(10);
509 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
510
511 use crate::query::plan::{LogicalOperator, LogicalPlan};
512 let plan = LogicalPlan::new(LogicalOperator::Empty);
513
514 cache.put_parsed(key.clone(), plan.clone());
515 cache.put_optimized(key.clone(), plan);
516
517 assert!(cache.get_parsed(&key).is_some());
518 assert!(cache.get_optimized(&key).is_some());
519
520 cache.invalidate(&key);
522
523 cache.reset_stats();
525
526 assert!(cache.get_parsed(&key).is_none());
527 assert!(cache.get_optimized(&key).is_none());
528 }
529
530 #[test]
531 fn test_cache_disabled() {
532 let cache = QueryCache::disabled();
533 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
534
535 use crate::query::plan::{LogicalOperator, LogicalPlan};
536 let plan = LogicalPlan::new(LogicalOperator::Empty);
537
538 cache.put_parsed(key.clone(), plan);
540 assert!(cache.get_parsed(&key).is_none());
541
542 let stats = cache.stats();
544 assert_eq!(stats.parsed_size, 0);
545 }
546
547 #[test]
548 fn test_cache_stats() {
549 let cache = QueryCache::new(10);
550
551 use crate::query::plan::{LogicalOperator, LogicalPlan};
552
553 let key1 = CacheKey::new("QUERY 1", test_language());
554 let key2 = CacheKey::new("QUERY 2", test_language());
555 let plan = LogicalPlan::new(LogicalOperator::Empty);
556
557 cache.get_optimized(&key1);
559
560 cache.put_optimized(key1.clone(), plan);
562 cache.get_optimized(&key1);
563 cache.get_optimized(&key1);
564
565 cache.get_optimized(&key2);
567
568 let stats = cache.stats();
569 assert_eq!(stats.optimized_hits, 2);
570 assert_eq!(stats.optimized_misses, 2);
571 assert!((stats.optimized_hit_rate() - 0.5).abs() < 0.01);
572 }
573}