1use parking_lot::Mutex;
28use std::collections::HashMap;
29use std::hash::Hash;
30use std::sync::atomic::{AtomicU64, Ordering};
31use std::time::Instant;
32
33use crate::query::plan::LogicalPlan;
34use crate::query::processor::QueryLanguage;
35
36#[derive(Clone, Eq, PartialEq, Hash)]
38pub struct CacheKey {
39 query: String,
41 language: QueryLanguage,
43}
44
45impl CacheKey {
46 #[must_use]
48 pub fn new(query: impl Into<String>, language: QueryLanguage) -> Self {
49 Self {
50 query: normalize_query(&query.into()),
51 language,
52 }
53 }
54
55 #[must_use]
57 pub fn query(&self) -> &str {
58 &self.query
59 }
60
61 #[must_use]
63 pub fn language(&self) -> QueryLanguage {
64 self.language
65 }
66}
67
68fn normalize_query(query: &str) -> String {
72 query.split_whitespace().collect::<Vec<_>>().join(" ")
74}
75
76struct CacheEntry<T> {
78 value: T,
80 access_count: u64,
82 last_accessed: Instant,
84}
85
86impl<T: Clone> CacheEntry<T> {
87 fn new(value: T) -> Self {
88 Self {
89 value,
90 access_count: 0,
91 last_accessed: Instant::now(),
92 }
93 }
94
95 fn access(&mut self) -> T {
96 self.access_count += 1;
97 self.last_accessed = Instant::now();
98 self.value.clone()
99 }
100}
101
102struct LruCache<K, V> {
104 entries: HashMap<K, CacheEntry<V>>,
106 capacity: usize,
108 access_order: Vec<K>,
110}
111
112impl<K: Clone + Eq + Hash, V: Clone> LruCache<K, V> {
113 fn new(capacity: usize) -> Self {
114 Self {
115 entries: HashMap::with_capacity(capacity),
116 capacity,
117 access_order: Vec::with_capacity(capacity),
118 }
119 }
120
121 fn get(&mut self, key: &K) -> Option<V> {
122 if let Some(entry) = self.entries.get_mut(key) {
123 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
125 self.access_order.remove(pos);
126 }
127 self.access_order.push(key.clone());
128 Some(entry.access())
129 } else {
130 None
131 }
132 }
133
134 fn put(&mut self, key: K, value: V) {
135 if self.entries.len() >= self.capacity && !self.entries.contains_key(&key) {
137 self.evict_lru();
138 }
139
140 if let Some(pos) = self.access_order.iter().position(|k| k == &key) {
142 self.access_order.remove(pos);
143 }
144
145 self.access_order.push(key.clone());
147 self.entries.insert(key, CacheEntry::new(value));
148 }
149
150 fn evict_lru(&mut self) {
151 if let Some(key) = self.access_order.first().cloned() {
152 self.access_order.remove(0);
153 self.entries.remove(&key);
154 }
155 }
156
157 fn clear(&mut self) {
158 self.entries.clear();
159 self.access_order.clear();
160 }
161
162 fn len(&self) -> usize {
163 self.entries.len()
164 }
165
166 fn remove(&mut self, key: &K) -> Option<V> {
167 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
168 self.access_order.remove(pos);
169 }
170 self.entries.remove(key).map(|e| e.value)
171 }
172}
173
174pub struct QueryCache {
176 parsed_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
178 optimized_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
180 parsed_hits: AtomicU64,
182 parsed_misses: AtomicU64,
184 optimized_hits: AtomicU64,
186 optimized_misses: AtomicU64,
188 enabled: bool,
190}
191
192impl QueryCache {
193 #[must_use]
198 pub fn new(capacity: usize) -> Self {
199 let half_capacity = capacity / 2;
200 Self {
201 parsed_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
202 optimized_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
203 parsed_hits: AtomicU64::new(0),
204 parsed_misses: AtomicU64::new(0),
205 optimized_hits: AtomicU64::new(0),
206 optimized_misses: AtomicU64::new(0),
207 enabled: true,
208 }
209 }
210
211 #[must_use]
213 pub fn disabled() -> Self {
214 Self {
215 parsed_cache: Mutex::new(LruCache::new(0)),
216 optimized_cache: Mutex::new(LruCache::new(0)),
217 parsed_hits: AtomicU64::new(0),
218 parsed_misses: AtomicU64::new(0),
219 optimized_hits: AtomicU64::new(0),
220 optimized_misses: AtomicU64::new(0),
221 enabled: false,
222 }
223 }
224
225 #[must_use]
227 pub fn is_enabled(&self) -> bool {
228 self.enabled
229 }
230
231 pub fn get_parsed(&self, key: &CacheKey) -> Option<LogicalPlan> {
233 if !self.enabled {
234 return None;
235 }
236
237 let result = self.parsed_cache.lock().get(key);
238 if result.is_some() {
239 self.parsed_hits.fetch_add(1, Ordering::Relaxed);
240 } else {
241 self.parsed_misses.fetch_add(1, Ordering::Relaxed);
242 }
243 result
244 }
245
246 pub fn put_parsed(&self, key: CacheKey, plan: LogicalPlan) {
248 if !self.enabled {
249 return;
250 }
251 self.parsed_cache.lock().put(key, plan);
252 }
253
254 pub fn get_optimized(&self, key: &CacheKey) -> Option<LogicalPlan> {
256 if !self.enabled {
257 return None;
258 }
259
260 let result = self.optimized_cache.lock().get(key);
261 if result.is_some() {
262 self.optimized_hits.fetch_add(1, Ordering::Relaxed);
263 } else {
264 self.optimized_misses.fetch_add(1, Ordering::Relaxed);
265 }
266 result
267 }
268
269 pub fn put_optimized(&self, key: CacheKey, plan: LogicalPlan) {
271 if !self.enabled {
272 return;
273 }
274 self.optimized_cache.lock().put(key, plan);
275 }
276
277 pub fn invalidate(&self, key: &CacheKey) {
279 self.parsed_cache.lock().remove(key);
280 self.optimized_cache.lock().remove(key);
281 }
282
283 pub fn clear(&self) {
285 self.parsed_cache.lock().clear();
286 self.optimized_cache.lock().clear();
287 }
288
289 #[must_use]
291 pub fn stats(&self) -> CacheStats {
292 CacheStats {
293 parsed_size: self.parsed_cache.lock().len(),
294 optimized_size: self.optimized_cache.lock().len(),
295 parsed_hits: self.parsed_hits.load(Ordering::Relaxed),
296 parsed_misses: self.parsed_misses.load(Ordering::Relaxed),
297 optimized_hits: self.optimized_hits.load(Ordering::Relaxed),
298 optimized_misses: self.optimized_misses.load(Ordering::Relaxed),
299 }
300 }
301
302 pub fn reset_stats(&self) {
304 self.parsed_hits.store(0, Ordering::Relaxed);
305 self.parsed_misses.store(0, Ordering::Relaxed);
306 self.optimized_hits.store(0, Ordering::Relaxed);
307 self.optimized_misses.store(0, Ordering::Relaxed);
308 }
309}
310
311impl Default for QueryCache {
312 fn default() -> Self {
313 Self::new(1000)
315 }
316}
317
318#[derive(Debug, Clone)]
320pub struct CacheStats {
321 pub parsed_size: usize,
323 pub optimized_size: usize,
325 pub parsed_hits: u64,
327 pub parsed_misses: u64,
329 pub optimized_hits: u64,
331 pub optimized_misses: u64,
333}
334
335impl CacheStats {
336 #[must_use]
338 pub fn parsed_hit_rate(&self) -> f64 {
339 let total = self.parsed_hits + self.parsed_misses;
340 if total == 0 {
341 0.0
342 } else {
343 self.parsed_hits as f64 / total as f64
344 }
345 }
346
347 #[must_use]
349 pub fn optimized_hit_rate(&self) -> f64 {
350 let total = self.optimized_hits + self.optimized_misses;
351 if total == 0 {
352 0.0
353 } else {
354 self.optimized_hits as f64 / total as f64
355 }
356 }
357
358 #[must_use]
360 pub fn total_size(&self) -> usize {
361 self.parsed_size + self.optimized_size
362 }
363
364 #[must_use]
366 pub fn total_hit_rate(&self) -> f64 {
367 let total_hits = self.parsed_hits + self.optimized_hits;
368 let total_misses = self.parsed_misses + self.optimized_misses;
369 let total = total_hits + total_misses;
370 if total == 0 {
371 0.0
372 } else {
373 total_hits as f64 / total as f64
374 }
375 }
376}
377
378pub struct CachingQueryProcessor<P> {
383 processor: P,
385 cache: QueryCache,
387}
388
389impl<P> CachingQueryProcessor<P> {
390 pub fn new(processor: P, cache: QueryCache) -> Self {
392 Self { processor, cache }
393 }
394
395 pub fn with_default_cache(processor: P) -> Self {
397 Self::new(processor, QueryCache::default())
398 }
399
400 #[must_use]
402 pub fn cache(&self) -> &QueryCache {
403 &self.cache
404 }
405
406 #[must_use]
408 pub fn processor(&self) -> &P {
409 &self.processor
410 }
411
412 #[must_use]
414 pub fn stats(&self) -> CacheStats {
415 self.cache.stats()
416 }
417
418 pub fn clear_cache(&self) {
420 self.cache.clear();
421 }
422}
423
424#[cfg(test)]
425mod tests {
426 use super::*;
427
428 #[cfg(feature = "gql")]
429 fn test_language() -> QueryLanguage {
430 QueryLanguage::Gql
431 }
432
433 #[cfg(not(feature = "gql"))]
434 fn test_language() -> QueryLanguage {
435 #[cfg(feature = "cypher")]
437 return QueryLanguage::Cypher;
438 #[cfg(feature = "sparql")]
439 return QueryLanguage::Sparql;
440 }
441
442 #[test]
443 fn test_cache_key_normalization() {
444 let key1 = CacheKey::new("MATCH (n) RETURN n", test_language());
445 let key2 = CacheKey::new("MATCH (n) RETURN n", test_language());
446
447 assert_eq!(key1.query(), key2.query());
449 }
450
451 #[test]
452 fn test_cache_basic_operations() {
453 let cache = QueryCache::new(10);
454 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
455
456 use crate::query::plan::{LogicalOperator, LogicalPlan};
458 let plan = LogicalPlan::new(LogicalOperator::Empty);
459
460 assert!(cache.get_parsed(&key).is_none());
462
463 cache.put_parsed(key.clone(), plan.clone());
465 assert!(cache.get_parsed(&key).is_some());
466
467 let stats = cache.stats();
469 assert_eq!(stats.parsed_size, 1);
470 assert_eq!(stats.parsed_hits, 1);
471 assert_eq!(stats.parsed_misses, 1);
472 }
473
474 #[test]
475 fn test_cache_lru_eviction() {
476 let cache = QueryCache::new(4); use crate::query::plan::{LogicalOperator, LogicalPlan};
479
480 for i in 0..3 {
482 let key = CacheKey::new(format!("QUERY {}", i), test_language());
483 cache.put_parsed(key, LogicalPlan::new(LogicalOperator::Empty));
484 }
485
486 let key0 = CacheKey::new("QUERY 0", test_language());
488 assert!(cache.get_parsed(&key0).is_none());
489
490 let key1 = CacheKey::new("QUERY 1", test_language());
492 let key2 = CacheKey::new("QUERY 2", test_language());
493 assert!(cache.get_parsed(&key1).is_some());
494 assert!(cache.get_parsed(&key2).is_some());
495 }
496
497 #[test]
498 fn test_cache_invalidation() {
499 let cache = QueryCache::new(10);
500 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
501
502 use crate::query::plan::{LogicalOperator, LogicalPlan};
503 let plan = LogicalPlan::new(LogicalOperator::Empty);
504
505 cache.put_parsed(key.clone(), plan.clone());
506 cache.put_optimized(key.clone(), plan);
507
508 assert!(cache.get_parsed(&key).is_some());
509 assert!(cache.get_optimized(&key).is_some());
510
511 cache.invalidate(&key);
513
514 cache.reset_stats();
516
517 assert!(cache.get_parsed(&key).is_none());
518 assert!(cache.get_optimized(&key).is_none());
519 }
520
521 #[test]
522 fn test_cache_disabled() {
523 let cache = QueryCache::disabled();
524 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
525
526 use crate::query::plan::{LogicalOperator, LogicalPlan};
527 let plan = LogicalPlan::new(LogicalOperator::Empty);
528
529 cache.put_parsed(key.clone(), plan);
531 assert!(cache.get_parsed(&key).is_none());
532
533 let stats = cache.stats();
535 assert_eq!(stats.parsed_size, 0);
536 }
537
538 #[test]
539 fn test_cache_stats() {
540 let cache = QueryCache::new(10);
541
542 use crate::query::plan::{LogicalOperator, LogicalPlan};
543
544 let key1 = CacheKey::new("QUERY 1", test_language());
545 let key2 = CacheKey::new("QUERY 2", test_language());
546 let plan = LogicalPlan::new(LogicalOperator::Empty);
547
548 cache.get_optimized(&key1);
550
551 cache.put_optimized(key1.clone(), plan);
553 cache.get_optimized(&key1);
554 cache.get_optimized(&key1);
555
556 cache.get_optimized(&key2);
558
559 let stats = cache.stats();
560 assert_eq!(stats.optimized_hits, 2);
561 assert_eq!(stats.optimized_misses, 2);
562 assert!((stats.optimized_hit_rate() - 0.5).abs() < 0.01);
563 }
564}