use super::types::CacheStats;
use crate::cache::CacheConfig;
use crate::cache::policy::{
CacheAdmission, CachePolicy, CachePolicyConfig, CachePolicyKind, build_cache_policy,
};
use crate::query::ParsedQuery;
use log::{debug, warn};
use lru::LruCache;
use parking_lot::RwLock;
use std::num::NonZeroUsize;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, Ordering};
const MIN_CAPACITY: u64 = 1;
const AST_ENTRY_WEIGHT_BYTES: u64 = 2048;
pub struct AstParseCache {
cache: RwLock<LruCache<String, Arc<ParsedQuery>>>,
capacity: usize,
policy: Arc<dyn CachePolicy<String>>,
hits: AtomicU64,
misses: AtomicU64,
evictions: AtomicU64,
}
impl AstParseCache {
#[must_use]
pub fn new(capacity: usize) -> Self {
let cap = if capacity == 0 {
warn!("AstParseCache::new received zero capacity; defaulting to {MIN_CAPACITY}");
#[allow(clippy::cast_possible_truncation)]
{
MIN_CAPACITY as usize
}
} else {
capacity
};
let (kind, window_ratio) = Self::policy_params_from_env();
Self::with_policy(cap, kind, window_ratio)
}
fn with_policy(capacity: usize, kind: CachePolicyKind, window_ratio: f32) -> Self {
#[allow(clippy::cast_possible_truncation)]
let normalized_capacity = capacity.max(MIN_CAPACITY as usize);
let cap = NonZeroUsize::new(normalized_capacity).unwrap_or(NonZeroUsize::MIN);
let policy_config = CachePolicyConfig::new(kind, normalized_capacity as u64, window_ratio);
Self {
cache: RwLock::new(LruCache::new(cap)),
capacity: normalized_capacity,
policy: build_cache_policy(&policy_config),
hits: AtomicU64::new(0),
misses: AtomicU64::new(0),
evictions: AtomicU64::new(0),
}
}
fn policy_params_from_env() -> (CachePolicyKind, f32) {
let cfg = CacheConfig::from_env();
(cfg.policy_kind(), cfg.policy_window_ratio())
}
fn handle_policy_evictions(&self) {
let evicted = self.policy.drain_evictions();
if evicted.is_empty() {
return;
}
let mut cache = self.cache.write();
for eviction in evicted {
if cache.pop(&eviction.key).is_some() {
self.evictions.fetch_add(1, Ordering::Relaxed);
}
}
}
pub fn get(&self, query_str: &str) -> Option<Arc<ParsedQuery>> {
self.handle_policy_evictions();
let mut cache = self.cache.write();
if let Some(parsed_arc) = cache.get(query_str) {
self.hits.fetch_add(1, Ordering::Relaxed);
let key = query_str.to_owned();
let _ = self.policy.record_hit(&key);
Some(parsed_arc.clone())
} else {
self.misses.fetch_add(1, Ordering::Relaxed);
None
}
}
pub fn insert(&self, query_str: String, parsed: ParsedQuery) {
self.insert_arc(query_str, Arc::new(parsed));
}
pub fn insert_arc(&self, query_str: String, parsed_arc: Arc<ParsedQuery>) {
self.handle_policy_evictions();
let key_clone = query_str.clone();
if matches!(
self.policy.admit(&key_clone, AST_ENTRY_WEIGHT_BYTES),
CacheAdmission::Rejected
) {
debug!(
"AST parse cache policy {:?} rejected entry",
self.policy.kind()
);
return;
}
let mut cache = self.cache.write();
if cache.len() == self.capacity
&& let Some((evicted_key, _)) = cache.pop_lru()
{
self.policy.invalidate(&evicted_key);
self.evictions.fetch_add(1, Ordering::Relaxed);
}
cache.put(query_str, parsed_arc);
}
pub fn stats(&self) -> CacheStats {
CacheStats {
hits: self.hits.load(Ordering::Relaxed),
misses: self.misses.load(Ordering::Relaxed),
evictions: self.evictions.load(Ordering::Relaxed),
}
}
pub fn clear(&self) {
let mut cache = self.cache.write();
cache.clear();
self.hits.store(0, Ordering::Relaxed);
self.misses.store(0, Ordering::Relaxed);
self.evictions.store(0, Ordering::Relaxed);
self.policy.reset();
}
pub fn len(&self) -> usize {
self.cache.read().len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[cfg(test)]
fn with_policy_kind(capacity: usize, kind: CachePolicyKind) -> Self {
Self::with_policy(capacity, kind, CacheConfig::DEFAULT_POLICY_WINDOW_RATIO)
}
#[cfg(test)]
fn policy_metrics(&self) -> crate::cache::policy::CachePolicyMetrics {
self.policy.stats()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::cache::policy::CachePolicyKind;
use crate::query::types::{Condition, Expr, Field, Operator, Query as QueryAST, Span, Value};
fn make_test_parsed_query(_query_str: &str) -> ParsedQuery {
let ast = QueryAST {
root: Expr::Condition(Condition {
field: Field::new("kind"),
operator: Operator::Equal,
value: Value::String("function".to_string()),
span: Span::new(0, 13),
}),
span: Span::new(0, 13),
};
ParsedQuery::from_ast(Arc::new(ast)).unwrap()
}
#[test]
fn ast_parse_cache_hit() {
let cache = AstParseCache::new(100);
let query_str = "kind:function";
let parsed = make_test_parsed_query(query_str);
cache.insert(query_str.to_string(), parsed.clone());
let cached = cache.get(query_str).unwrap();
assert_eq!(cached.normalized.as_ref(), parsed.normalized.as_ref());
let stats = cache.stats();
assert_eq!(stats.hits, 1);
assert_eq!(stats.misses, 0);
}
#[test]
fn ast_parse_cache_miss() {
let cache = AstParseCache::new(100);
let result = cache.get("kind:function");
assert!(result.is_none());
let stats = cache.stats();
assert_eq!(stats.hits, 0);
assert_eq!(stats.misses, 1);
}
#[test]
fn ast_parse_cache_eviction() {
let cache = AstParseCache::new(2);
cache.insert("q1".into(), make_test_parsed_query("q1"));
cache.insert("q2".into(), make_test_parsed_query("q2"));
cache.insert("q3".into(), make_test_parsed_query("q3"));
let count = cache.len();
assert!(
count <= 2,
"Cache should have at most 2 entries after eviction, got {count}"
);
let stats = cache.stats();
assert!(
stats.evictions >= 1,
"Eviction counter should be incremented (got {})",
stats.evictions
);
}
#[test]
fn ast_parse_cache_clear() {
let cache = AstParseCache::new(100);
cache.insert("q1".into(), make_test_parsed_query("q1"));
cache.clear();
assert_eq!(cache.len(), 0);
assert!(cache.get("q1").is_none());
}
#[test]
fn ast_parse_cache_zero_capacity_defaults_to_one() {
let cache = AstParseCache::new(0);
cache.insert("q1".into(), make_test_parsed_query("q1"));
cache.insert("q2".into(), make_test_parsed_query("q2"));
let count = cache.len();
assert!(
count <= 1,
"Cache with capacity 1 should have at most 1 entry, got {count}"
);
}
#[test]
fn ast_parse_cache_arc_sharing() {
let cache = AstParseCache::new(100);
let query_str = "kind:function";
let parsed = make_test_parsed_query(query_str);
cache.insert(query_str.to_string(), parsed);
let cached1 = cache.get(query_str).unwrap();
let cached2 = cache.get(query_str).unwrap();
assert!(Arc::ptr_eq(&cached1, &cached2));
}
#[test]
fn tiny_lfu_preserves_hot_queries() {
let cache = AstParseCache::with_policy_kind(3, CachePolicyKind::TinyLfu);
cache.insert("hot".into(), make_test_parsed_query("hot"));
for i in 0..20 {
let query = format!("cold{i}");
cache.insert(query.clone(), make_test_parsed_query(&query));
}
let metrics = cache.policy_metrics();
assert!(
metrics.lfu_rejects > 0,
"expected TinyLFU to reject cold entries"
);
}
}