1use parking_lot::Mutex;
28use std::collections::HashMap;
29use std::hash::Hash;
30use std::sync::atomic::{AtomicU64, Ordering};
31use std::time::Instant;
32
33use crate::query::plan::LogicalPlan;
34use crate::query::processor::QueryLanguage;
35
36#[derive(Clone, Eq, PartialEq, Hash)]
38pub struct CacheKey {
39 query: String,
41 language: QueryLanguage,
43}
44
45impl CacheKey {
46 #[must_use]
48 pub fn new(query: impl Into<String>, language: QueryLanguage) -> Self {
49 Self {
50 query: normalize_query(&query.into()),
51 language,
52 }
53 }
54
55 #[must_use]
57 pub fn query(&self) -> &str {
58 &self.query
59 }
60
61 #[must_use]
63 pub fn language(&self) -> QueryLanguage {
64 self.language
65 }
66}
67
68fn normalize_query(query: &str) -> String {
72 query.split_whitespace().collect::<Vec<_>>().join(" ")
74}
75
76struct CacheEntry<T> {
78 value: T,
80 #[allow(dead_code)]
82 created_at: Instant,
83 access_count: u64,
85 last_accessed: Instant,
87}
88
89impl<T: Clone> CacheEntry<T> {
90 fn new(value: T) -> Self {
91 let now = Instant::now();
92 Self {
93 value,
94 created_at: now,
95 access_count: 0,
96 last_accessed: now,
97 }
98 }
99
100 fn access(&mut self) -> T {
101 self.access_count += 1;
102 self.last_accessed = Instant::now();
103 self.value.clone()
104 }
105}
106
107struct LruCache<K, V> {
109 entries: HashMap<K, CacheEntry<V>>,
111 capacity: usize,
113 access_order: Vec<K>,
115}
116
117impl<K: Clone + Eq + Hash, V: Clone> LruCache<K, V> {
118 fn new(capacity: usize) -> Self {
119 Self {
120 entries: HashMap::with_capacity(capacity),
121 capacity,
122 access_order: Vec::with_capacity(capacity),
123 }
124 }
125
126 fn get(&mut self, key: &K) -> Option<V> {
127 if let Some(entry) = self.entries.get_mut(key) {
128 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
130 self.access_order.remove(pos);
131 }
132 self.access_order.push(key.clone());
133 Some(entry.access())
134 } else {
135 None
136 }
137 }
138
139 fn put(&mut self, key: K, value: V) {
140 if self.entries.len() >= self.capacity && !self.entries.contains_key(&key) {
142 self.evict_lru();
143 }
144
145 if let Some(pos) = self.access_order.iter().position(|k| k == &key) {
147 self.access_order.remove(pos);
148 }
149
150 self.access_order.push(key.clone());
152 self.entries.insert(key, CacheEntry::new(value));
153 }
154
155 fn evict_lru(&mut self) {
156 if let Some(key) = self.access_order.first().cloned() {
157 self.access_order.remove(0);
158 self.entries.remove(&key);
159 }
160 }
161
162 fn clear(&mut self) {
163 self.entries.clear();
164 self.access_order.clear();
165 }
166
167 fn len(&self) -> usize {
168 self.entries.len()
169 }
170
171 fn remove(&mut self, key: &K) -> Option<V> {
172 if let Some(pos) = self.access_order.iter().position(|k| k == key) {
173 self.access_order.remove(pos);
174 }
175 self.entries.remove(key).map(|e| e.value)
176 }
177}
178
179pub struct QueryCache {
181 parsed_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
183 optimized_cache: Mutex<LruCache<CacheKey, LogicalPlan>>,
185 parsed_hits: AtomicU64,
187 parsed_misses: AtomicU64,
189 optimized_hits: AtomicU64,
191 optimized_misses: AtomicU64,
193 enabled: bool,
195}
196
197impl QueryCache {
198 #[must_use]
203 pub fn new(capacity: usize) -> Self {
204 let half_capacity = capacity / 2;
205 Self {
206 parsed_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
207 optimized_cache: Mutex::new(LruCache::new(half_capacity.max(1))),
208 parsed_hits: AtomicU64::new(0),
209 parsed_misses: AtomicU64::new(0),
210 optimized_hits: AtomicU64::new(0),
211 optimized_misses: AtomicU64::new(0),
212 enabled: true,
213 }
214 }
215
216 #[must_use]
218 pub fn disabled() -> Self {
219 Self {
220 parsed_cache: Mutex::new(LruCache::new(0)),
221 optimized_cache: Mutex::new(LruCache::new(0)),
222 parsed_hits: AtomicU64::new(0),
223 parsed_misses: AtomicU64::new(0),
224 optimized_hits: AtomicU64::new(0),
225 optimized_misses: AtomicU64::new(0),
226 enabled: false,
227 }
228 }
229
230 #[must_use]
232 pub fn is_enabled(&self) -> bool {
233 self.enabled
234 }
235
236 pub fn get_parsed(&self, key: &CacheKey) -> Option<LogicalPlan> {
238 if !self.enabled {
239 return None;
240 }
241
242 let result = self.parsed_cache.lock().get(key);
243 if result.is_some() {
244 self.parsed_hits.fetch_add(1, Ordering::Relaxed);
245 } else {
246 self.parsed_misses.fetch_add(1, Ordering::Relaxed);
247 }
248 result
249 }
250
251 pub fn put_parsed(&self, key: CacheKey, plan: LogicalPlan) {
253 if !self.enabled {
254 return;
255 }
256 self.parsed_cache.lock().put(key, plan);
257 }
258
259 pub fn get_optimized(&self, key: &CacheKey) -> Option<LogicalPlan> {
261 if !self.enabled {
262 return None;
263 }
264
265 let result = self.optimized_cache.lock().get(key);
266 if result.is_some() {
267 self.optimized_hits.fetch_add(1, Ordering::Relaxed);
268 } else {
269 self.optimized_misses.fetch_add(1, Ordering::Relaxed);
270 }
271 result
272 }
273
274 pub fn put_optimized(&self, key: CacheKey, plan: LogicalPlan) {
276 if !self.enabled {
277 return;
278 }
279 self.optimized_cache.lock().put(key, plan);
280 }
281
282 pub fn invalidate(&self, key: &CacheKey) {
284 self.parsed_cache.lock().remove(key);
285 self.optimized_cache.lock().remove(key);
286 }
287
288 pub fn clear(&self) {
290 self.parsed_cache.lock().clear();
291 self.optimized_cache.lock().clear();
292 }
293
294 #[must_use]
296 pub fn stats(&self) -> CacheStats {
297 CacheStats {
298 parsed_size: self.parsed_cache.lock().len(),
299 optimized_size: self.optimized_cache.lock().len(),
300 parsed_hits: self.parsed_hits.load(Ordering::Relaxed),
301 parsed_misses: self.parsed_misses.load(Ordering::Relaxed),
302 optimized_hits: self.optimized_hits.load(Ordering::Relaxed),
303 optimized_misses: self.optimized_misses.load(Ordering::Relaxed),
304 }
305 }
306
307 pub fn reset_stats(&self) {
309 self.parsed_hits.store(0, Ordering::Relaxed);
310 self.parsed_misses.store(0, Ordering::Relaxed);
311 self.optimized_hits.store(0, Ordering::Relaxed);
312 self.optimized_misses.store(0, Ordering::Relaxed);
313 }
314}
315
316impl Default for QueryCache {
317 fn default() -> Self {
318 Self::new(1000)
320 }
321}
322
323#[derive(Debug, Clone)]
325pub struct CacheStats {
326 pub parsed_size: usize,
328 pub optimized_size: usize,
330 pub parsed_hits: u64,
332 pub parsed_misses: u64,
334 pub optimized_hits: u64,
336 pub optimized_misses: u64,
338}
339
340impl CacheStats {
341 #[must_use]
343 pub fn parsed_hit_rate(&self) -> f64 {
344 let total = self.parsed_hits + self.parsed_misses;
345 if total == 0 {
346 0.0
347 } else {
348 self.parsed_hits as f64 / total as f64
349 }
350 }
351
352 #[must_use]
354 pub fn optimized_hit_rate(&self) -> f64 {
355 let total = self.optimized_hits + self.optimized_misses;
356 if total == 0 {
357 0.0
358 } else {
359 self.optimized_hits as f64 / total as f64
360 }
361 }
362
363 #[must_use]
365 pub fn total_size(&self) -> usize {
366 self.parsed_size + self.optimized_size
367 }
368
369 #[must_use]
371 pub fn total_hit_rate(&self) -> f64 {
372 let total_hits = self.parsed_hits + self.optimized_hits;
373 let total_misses = self.parsed_misses + self.optimized_misses;
374 let total = total_hits + total_misses;
375 if total == 0 {
376 0.0
377 } else {
378 total_hits as f64 / total as f64
379 }
380 }
381}
382
383pub struct CachingQueryProcessor<P> {
388 processor: P,
390 cache: QueryCache,
392}
393
394impl<P> CachingQueryProcessor<P> {
395 pub fn new(processor: P, cache: QueryCache) -> Self {
397 Self { processor, cache }
398 }
399
400 pub fn with_default_cache(processor: P) -> Self {
402 Self::new(processor, QueryCache::default())
403 }
404
405 #[must_use]
407 pub fn cache(&self) -> &QueryCache {
408 &self.cache
409 }
410
411 #[must_use]
413 pub fn processor(&self) -> &P {
414 &self.processor
415 }
416
417 #[must_use]
419 pub fn stats(&self) -> CacheStats {
420 self.cache.stats()
421 }
422
423 pub fn clear_cache(&self) {
425 self.cache.clear();
426 }
427}
428
429#[cfg(test)]
430mod tests {
431 use super::*;
432
433 #[cfg(feature = "gql")]
434 fn test_language() -> QueryLanguage {
435 QueryLanguage::Gql
436 }
437
438 #[cfg(not(feature = "gql"))]
439 fn test_language() -> QueryLanguage {
440 #[cfg(feature = "cypher")]
442 return QueryLanguage::Cypher;
443 #[cfg(feature = "sparql")]
444 return QueryLanguage::Sparql;
445 }
446
447 #[test]
448 fn test_cache_key_normalization() {
449 let key1 = CacheKey::new("MATCH (n) RETURN n", test_language());
450 let key2 = CacheKey::new("MATCH (n) RETURN n", test_language());
451
452 assert_eq!(key1.query(), key2.query());
454 }
455
456 #[test]
457 fn test_cache_basic_operations() {
458 let cache = QueryCache::new(10);
459 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
460
461 use crate::query::plan::{LogicalOperator, LogicalPlan};
463 let plan = LogicalPlan::new(LogicalOperator::Empty);
464
465 assert!(cache.get_parsed(&key).is_none());
467
468 cache.put_parsed(key.clone(), plan.clone());
470 assert!(cache.get_parsed(&key).is_some());
471
472 let stats = cache.stats();
474 assert_eq!(stats.parsed_size, 1);
475 assert_eq!(stats.parsed_hits, 1);
476 assert_eq!(stats.parsed_misses, 1);
477 }
478
479 #[test]
480 fn test_cache_lru_eviction() {
481 let cache = QueryCache::new(4); use crate::query::plan::{LogicalOperator, LogicalPlan};
484
485 for i in 0..3 {
487 let key = CacheKey::new(format!("QUERY {}", i), test_language());
488 cache.put_parsed(key, LogicalPlan::new(LogicalOperator::Empty));
489 }
490
491 let key0 = CacheKey::new("QUERY 0", test_language());
493 assert!(cache.get_parsed(&key0).is_none());
494
495 let key1 = CacheKey::new("QUERY 1", test_language());
497 let key2 = CacheKey::new("QUERY 2", test_language());
498 assert!(cache.get_parsed(&key1).is_some());
499 assert!(cache.get_parsed(&key2).is_some());
500 }
501
502 #[test]
503 fn test_cache_invalidation() {
504 let cache = QueryCache::new(10);
505 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
506
507 use crate::query::plan::{LogicalOperator, LogicalPlan};
508 let plan = LogicalPlan::new(LogicalOperator::Empty);
509
510 cache.put_parsed(key.clone(), plan.clone());
511 cache.put_optimized(key.clone(), plan);
512
513 assert!(cache.get_parsed(&key).is_some());
514 assert!(cache.get_optimized(&key).is_some());
515
516 cache.invalidate(&key);
518
519 cache.reset_stats();
521
522 assert!(cache.get_parsed(&key).is_none());
523 assert!(cache.get_optimized(&key).is_none());
524 }
525
526 #[test]
527 fn test_cache_disabled() {
528 let cache = QueryCache::disabled();
529 let key = CacheKey::new("MATCH (n) RETURN n", test_language());
530
531 use crate::query::plan::{LogicalOperator, LogicalPlan};
532 let plan = LogicalPlan::new(LogicalOperator::Empty);
533
534 cache.put_parsed(key.clone(), plan);
536 assert!(cache.get_parsed(&key).is_none());
537
538 let stats = cache.stats();
540 assert_eq!(stats.parsed_size, 0);
541 }
542
543 #[test]
544 fn test_cache_stats() {
545 let cache = QueryCache::new(10);
546
547 use crate::query::plan::{LogicalOperator, LogicalPlan};
548
549 let key1 = CacheKey::new("QUERY 1", test_language());
550 let key2 = CacheKey::new("QUERY 2", test_language());
551 let plan = LogicalPlan::new(LogicalOperator::Empty);
552
553 cache.get_optimized(&key1);
555
556 cache.put_optimized(key1.clone(), plan);
558 cache.get_optimized(&key1);
559 cache.get_optimized(&key1);
560
561 cache.get_optimized(&key2);
563
564 let stats = cache.stats();
565 assert_eq!(stats.optimized_hits, 2);
566 assert_eq!(stats.optimized_misses, 2);
567 assert!((stats.optimized_hit_rate() - 0.5).abs() < 0.01);
568 }
569}