1use parking_lot::Mutex;
32use std::collections::HashMap;
33use std::hash::Hash;
34use std::sync::atomic::{AtomicU64, Ordering};
35use std::time::Instant;
36
37use crate::query::plan::LogicalPlan;
38use crate::query::processor::QueryLanguage;
39
40#[derive(Clone, Eq, PartialEq, Hash)]
42pub struct CacheKey {
43 query: String,
45 language: QueryLanguage,
47}
48
49impl CacheKey {
50 #[must_use]
52 pub fn new(query: impl Into<String>, language: QueryLanguage) -> Self {
53 Self {
54 query: normalize_query(&query.into()),
55 language,
56 }
57 }
58
59 #[must_use]
61 pub fn query(&self) -> &str {
62 &self.query
63 }
64
65 #[must_use]
67 pub fn language(&self) -> QueryLanguage {
68 self.language
69 }
70}
71
72fn normalize_query(query: &str) -> String {
76 query.split_whitespace().collect::<Vec<_>>().join(" ")
78}
79
80struct CacheEntry<T> {
82 value: T,
84 access_count: u64,
86 last_accessed: Instant,
88}
89
90impl<T: Clone> CacheEntry<T> {
91 fn new(value: T) -> Self {
92 Self {
93 value,
94 access_count: 0,
95 last_accessed: Instant::now(),
96 }
97 }
98
99 fn access(&mut self) -> T {
100 self.access_count += 1;
101 self.last_accessed = Instant::now();
102 self.value.clone()
103 }
104}
105
106struct LruCache<K, V> {
108 entries: HashMap<K, CacheEntry<V>>,
110 capacity: usize,
112 access_order: Vec<K>,
114}
115
116impl<K: Clone + Eq + Hash, V: Clone> LruCache<K, V> {
117 fn new(capacity: usize) -> Self {
118 Self {
119 entries: HashMap::with_capacity(capacity),
120 capacity,
121 access_order: Vec::with_capacity(capacity),
122 }
123 }
124
125 fn get(&mut self, key: &K) -> Option<V> {
126 if let Some(entry) = self.entries.get_mut(key) {
127 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
129 self.access_order.remove(pos);
130 }
131 self.access_order.push(key.clone());
132 Some(entry.access())
133 } else {
134 None
135 }
136 }
137
138 fn put(&mut self, key: K, value: V) {
139 if self.entries.len() >= self.capacity && !self.entries.contains_key(&key) {
141 self.evict_lru();
142 }
143
144 if let Some(pos) = self.access_order.iter().position(|k| k == &key) {
146 self.access_order.remove(pos);
147 }
148
149 self.access_order.push(key.clone());
151 self.entries.insert(key, CacheEntry::new(value));
152 }
153
154 fn evict_lru(&mut self) {
155 if let Some(key) = self.access_order.first().cloned() {
156 self.access_order.remove(0);
157 self.entries.remove(&key);
158 }
159 }
160
161 fn clear(&mut self) {
162 self.entries.clear();
163 self.access_order.clear();
164 }
165
166 fn len(&self) -> usize {
167 self.entries.len()
168 }
169
170 fn remove(&mut self, key: &K) -> Option<V> {
171 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
172 self.access_order.remove(pos);
173 }
174 self.entries.remove(key).map(|e| e.value)
175 }
176}
177
178pub struct QueryCache {
180 parsed_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
182 optimized_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
184 parsed_hits: AtomicU64,
186 parsed_misses: AtomicU64,
188 optimized_hits: AtomicU64,
190 optimized_misses: AtomicU64,
192 enabled: bool,
194}
195
196impl QueryCache {
197 #[must_use]
202 pub fn new(capacity: usize) -> Self {
203 let half_capacity = capacity / 2;
204 Self {
205 parsed_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
206 optimized_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
207 parsed_hits: AtomicU64::new(0),
208 parsed_misses: AtomicU64::new(0),
209 optimized_hits: AtomicU64::new(0),
210 optimized_misses: AtomicU64::new(0),
211 enabled: true,
212 }
213 }
214
215 #[must_use]
217 pub fn disabled() -> Self {
218 Self {
219 parsed_cache: Mutex::new(LruCache::new(0)),
220 optimized_cache: Mutex::new(LruCache::new(0)),
221 parsed_hits: AtomicU64::new(0),
222 parsed_misses: AtomicU64::new(0),
223 optimized_hits: AtomicU64::new(0),
224 optimized_misses: AtomicU64::new(0),
225 enabled: false,
226 }
227 }
228
229 #[must_use]
231 pub fn is_enabled(&self) -> bool {
232 self.enabled
233 }
234
235 pub fn get_parsed(&self, key: &CacheKey) -> Option<LogicalPlan> {
237 if !self.enabled {
238 return None;
239 }
240
241 let result = self.parsed_cache.lock().get(key);
242 if result.is_some() {
243 self.parsed_hits.fetch_add(1, Ordering::Relaxed);
244 } else {
245 self.parsed_misses.fetch_add(1, Ordering::Relaxed);
246 }
247 result
248 }
249
250 pub fn put_parsed(&self, key: CacheKey, plan: LogicalPlan) {
252 if !self.enabled {
253 return;
254 }
255 self.parsed_cache.lock().put(key, plan);
256 }
257
258 pub fn get_optimized(&self, key: &CacheKey) -> Option<LogicalPlan> {
260 if !self.enabled {
261 return None;
262 }
263
264 let result = self.optimized_cache.lock().get(key);
265 if result.is_some() {
266 self.optimized_hits.fetch_add(1, Ordering::Relaxed);
267 } else {
268 self.optimized_misses.fetch_add(1, Ordering::Relaxed);
269 }
270 result
271 }
272
273 pub fn put_optimized(&self, key: CacheKey, plan: LogicalPlan) {
275 if !self.enabled {
276 return;
277 }
278 self.optimized_cache.lock().put(key, plan);
279 }
280
281 pub fn invalidate(&self, key: &CacheKey) {
283 self.parsed_cache.lock().remove(key);
284 self.optimized_cache.lock().remove(key);
285 }
286
287 pub fn clear(&self) {
289 self.parsed_cache.lock().clear();
290 self.optimized_cache.lock().clear();
291 }
292
293 #[must_use]
295 pub fn stats(&self) -> CacheStats {
296 CacheStats {
297 parsed_size: self.parsed_cache.lock().len(),
298 optimized_size: self.optimized_cache.lock().len(),
299 parsed_hits: self.parsed_hits.load(Ordering::Relaxed),
300 parsed_misses: self.parsed_misses.load(Ordering::Relaxed),
301 optimized_hits: self.optimized_hits.load(Ordering::Relaxed),
302 optimized_misses: self.optimized_misses.load(Ordering::Relaxed),
303 }
304 }
305
306 pub fn reset_stats(&self) {
308 self.parsed_hits.store(0, Ordering::Relaxed);
309 self.parsed_misses.store(0, Ordering::Relaxed);
310 self.optimized_hits.store(0, Ordering::Relaxed);
311 self.optimized_misses.store(0, Ordering::Relaxed);
312 }
313}
314
315impl Default for QueryCache {
316 fn default() -> Self {
317 Self::new(1000)
319 }
320}
321
322#[derive(Debug, Clone)]
324pub struct CacheStats {
325 pub parsed_size: usize,
327 pub optimized_size: usize,
329 pub parsed_hits: u64,
331 pub parsed_misses: u64,
333 pub optimized_hits: u64,
335 pub optimized_misses: u64,
337}
338
339impl CacheStats {
340 #[must_use]
342 pub fn parsed_hit_rate(&self) -> f64 {
343 let total = self.parsed_hits + self.parsed_misses;
344 if total == 0 {
345 0.0
346 } else {
347 self.parsed_hits as f64 / total as f64
348 }
349 }
350
351 #[must_use]
353 pub fn optimized_hit_rate(&self) -> f64 {
354 let total = self.optimized_hits + self.optimized_misses;
355 if total == 0 {
356 0.0
357 } else {
358 self.optimized_hits as f64 / total as f64
359 }
360 }
361
362 #[must_use]
364 pub fn total_size(&self) -> usize {
365 self.parsed_size + self.optimized_size
366 }
367
368 #[must_use]
370 pub fn total_hit_rate(&self) -> f64 {
371 let total_hits = self.parsed_hits + self.optimized_hits;
372 let total_misses = self.parsed_misses + self.optimized_misses;
373 let total = total_hits + total_misses;
374 if total == 0 {
375 0.0
376 } else {
377 total_hits as f64 / total as f64
378 }
379 }
380}
381
382pub struct CachingQueryProcessor<P> {
387 processor: P,
389 cache: QueryCache,
391}
392
393impl<P> CachingQueryProcessor<P> {
394 pub fn new(processor: P, cache: QueryCache) -> Self {
396 Self { processor, cache }
397 }
398
399 pub fn with_default_cache(processor: P) -> Self {
401 Self::new(processor, QueryCache::default())
402 }
403
404 #[must_use]
406 pub fn cache(&self) -> &QueryCache {
407 &self.cache
408 }
409
410 #[must_use]
412 pub fn processor(&self) -> &P {
413 &self.processor
414 }
415
416 #[must_use]
418 pub fn stats(&self) -> CacheStats {
419 self.cache.stats()
420 }
421
422 pub fn clear_cache(&self) {
424 self.cache.clear();
425 }
426}
427
428#[cfg(test)]
429mod tests {
430 use super::*;
431
432 #[cfg(feature = "gql")]
433 fn test_language() -> QueryLanguage {
434 QueryLanguage::Gql
435 }
436
437 #[cfg(not(feature = "gql"))]
438 fn test_language() -> QueryLanguage {
439 #[cfg(feature = "cypher")]
441 return QueryLanguage::Cypher;
442 #[cfg(feature = "sparql")]
443 return QueryLanguage::Sparql;
444 }
445
446 #[test]
447 fn test_cache_key_normalization() {
448 let key1 = CacheKey::new("MATCH (n) RETURN n", test_language());
449 let key2 = CacheKey::new("MATCH (n) RETURN n", test_language());
450
451 assert_eq!(key1.query(), key2.query());
453 }
454
455 #[test]
456 fn test_cache_basic_operations() {
457 let cache = QueryCache::new(10);
458 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
459
460 use crate::query::plan::{LogicalOperator, LogicalPlan};
462 let plan = LogicalPlan::new(LogicalOperator::Empty);
463
464 assert!(cache.get_parsed(&key).is_none());
466
467 cache.put_parsed(key.clone(), plan.clone());
469 assert!(cache.get_parsed(&key).is_some());
470
471 let stats = cache.stats();
473 assert_eq!(stats.parsed_size, 1);
474 assert_eq!(stats.parsed_hits, 1);
475 assert_eq!(stats.parsed_misses, 1);
476 }
477
478 #[test]
479 fn test_cache_lru_eviction() {
480 let cache = QueryCache::new(4); use crate::query::plan::{LogicalOperator, LogicalPlan};
483
484 for i in 0..3 {
486 let key = CacheKey::new(format!("QUERY {}", i), test_language());
487 cache.put_parsed(key, LogicalPlan::new(LogicalOperator::Empty));
488 }
489
490 let key0 = CacheKey::new("QUERY 0", test_language());
492 assert!(cache.get_parsed(&key0).is_none());
493
494 let key1 = CacheKey::new("QUERY 1", test_language());
496 let key2 = CacheKey::new("QUERY 2", test_language());
497 assert!(cache.get_parsed(&key1).is_some());
498 assert!(cache.get_parsed(&key2).is_some());
499 }
500
501 #[test]
502 fn test_cache_invalidation() {
503 let cache = QueryCache::new(10);
504 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
505
506 use crate::query::plan::{LogicalOperator, LogicalPlan};
507 let plan = LogicalPlan::new(LogicalOperator::Empty);
508
509 cache.put_parsed(key.clone(), plan.clone());
510 cache.put_optimized(key.clone(), plan);
511
512 assert!(cache.get_parsed(&key).is_some());
513 assert!(cache.get_optimized(&key).is_some());
514
515 cache.invalidate(&key);
517
518 cache.reset_stats();
520
521 assert!(cache.get_parsed(&key).is_none());
522 assert!(cache.get_optimized(&key).is_none());
523 }
524
525 #[test]
526 fn test_cache_disabled() {
527 let cache = QueryCache::disabled();
528 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
529
530 use crate::query::plan::{LogicalOperator, LogicalPlan};
531 let plan = LogicalPlan::new(LogicalOperator::Empty);
532
533 cache.put_parsed(key.clone(), plan);
535 assert!(cache.get_parsed(&key).is_none());
536
537 let stats = cache.stats();
539 assert_eq!(stats.parsed_size, 0);
540 }
541
542 #[test]
543 fn test_cache_stats() {
544 let cache = QueryCache::new(10);
545
546 use crate::query::plan::{LogicalOperator, LogicalPlan};
547
548 let key1 = CacheKey::new("QUERY 1", test_language());
549 let key2 = CacheKey::new("QUERY 2", test_language());
550 let plan = LogicalPlan::new(LogicalOperator::Empty);
551
552 cache.get_optimized(&key1);
554
555 cache.put_optimized(key1.clone(), plan);
557 cache.get_optimized(&key1);
558 cache.get_optimized(&key1);
559
560 cache.get_optimized(&key2);
562
563 let stats = cache.stats();
564 assert_eq!(stats.optimized_hits, 2);
565 assert_eq!(stats.optimized_misses, 2);
566 assert!((stats.optimized_hit_rate() - 0.5).abs() < 0.01);
567 }
568}