use hdrhistogram::Histogram;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
use std::hash::{Hash, Hasher};
use std::sync::OnceLock;
use std::time::{Duration, Instant, SystemTime};
use super::types::{DependencyGraphData, TracePathData};
pub const CACHE_TTL_SECS: u64 = 300;
const CACHE_TTL: Duration = Duration::from_secs(CACHE_TTL_SECS);
pub const TRACE_PATH_CACHE_CAPACITY: usize = 256;
pub const SUBGRAPH_CACHE_CAPACITY: usize = 128;
const HISTOGRAM_MAX_MS: u64 = 600_000; const HISTOGRAM_SIGFIGS: u8 = 3;
const HISTOGRAM_BUCKET_BOUNDS: &[u64] = &[
10, 25, 50, 100, 200, 400, 800, 1600, 3200, 6400, 12_800, 25_600, 60_000,
];
const OVERFLOW_BUCKET_BOUNDS: &[u64] = &[120_000, 300_000, 600_000];
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub struct TracePathCacheKey {
pub workspace_root: std::path::PathBuf,
pub snapshot_sha256: String,
pub built_at_secs: i64,
pub schema_version: u32,
pub snapshot_format_version: u32,
pub from_symbol: String,
pub to_symbol: String,
pub max_hops: usize,
pub max_paths: usize,
pub cross_language: bool,
pub min_confidence_millis: u32, }
impl TracePathCacheKey {
pub fn new(
from_symbol: String,
to_symbol: String,
max_hops: usize,
max_paths: usize,
cross_language: bool,
min_confidence: f64,
) -> Self {
let scaled_confidence = (min_confidence.clamp(0.0, 1.0) * 1000.0)
.round()
.clamp(0.0, f64::from(u32::MAX));
#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
let min_confidence_millis = scaled_confidence as u32;
Self {
workspace_root: std::path::PathBuf::new(),
snapshot_sha256: String::new(),
built_at_secs: 0,
schema_version: 0,
snapshot_format_version: 0,
from_symbol,
to_symbol,
max_hops,
max_paths,
cross_language,
min_confidence_millis,
}
}
#[must_use]
pub fn with_graph_identity(mut self, identity: &crate::engine::GraphIdentity) -> Self {
self.workspace_root.clone_from(&identity.workspace_root);
self.snapshot_sha256.clone_from(&identity.snapshot_sha256);
self.built_at_secs = identity.built_at.timestamp();
self.schema_version = identity.schema_version;
self.snapshot_format_version = identity.snapshot_format_version;
self
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[allow(clippy::similar_names)] #[allow(clippy::struct_excessive_bools)] pub struct SubgraphCacheKey {
pub workspace_root: std::path::PathBuf,
pub snapshot_sha256: String,
pub built_at_secs: i64,
pub schema_version: u32,
pub snapshot_format_version: u32,
pub symbols: Vec<String>, pub max_depth: usize,
pub max_nodes: usize,
pub include_callers: bool,
pub include_callees: bool,
pub include_imports: bool,
pub cross_language: bool,
}
impl SubgraphCacheKey {
#[allow(clippy::similar_names)] #[allow(clippy::fn_params_excessive_bools)] pub fn new(
mut symbols: Vec<String>,
max_depth: usize,
max_nodes: usize,
include_callers: bool,
include_callees: bool,
include_imports: bool,
cross_language: bool,
) -> Self {
symbols.sort();
Self {
workspace_root: std::path::PathBuf::new(),
snapshot_sha256: String::new(),
built_at_secs: 0,
schema_version: 0,
snapshot_format_version: 0,
symbols,
max_depth,
max_nodes,
include_callers,
include_callees,
include_imports,
cross_language,
}
}
#[must_use]
pub fn with_graph_identity(mut self, identity: &crate::engine::GraphIdentity) -> Self {
self.workspace_root.clone_from(&identity.workspace_root);
self.snapshot_sha256.clone_from(&identity.snapshot_sha256);
self.built_at_secs = identity.built_at.timestamp();
self.schema_version = identity.schema_version;
self.snapshot_format_version = identity.snapshot_format_version;
self
}
}
impl PartialEq for SubgraphCacheKey {
fn eq(&self, other: &Self) -> bool {
self.workspace_root == other.workspace_root
&& self.snapshot_sha256 == other.snapshot_sha256
&& self.built_at_secs == other.built_at_secs
&& self.schema_version == other.schema_version
&& self.snapshot_format_version == other.snapshot_format_version
&& self.symbols == other.symbols
&& self.max_depth == other.max_depth
&& self.max_nodes == other.max_nodes
&& self.include_callers == other.include_callers
&& self.include_callees == other.include_callees
&& self.include_imports == other.include_imports
&& self.cross_language == other.cross_language
}
}
impl Eq for SubgraphCacheKey {}
impl Hash for SubgraphCacheKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.workspace_root.hash(state);
self.snapshot_sha256.hash(state);
self.built_at_secs.hash(state);
self.schema_version.hash(state);
self.snapshot_format_version.hash(state);
self.symbols.hash(state);
self.max_depth.hash(state);
self.max_nodes.hash(state);
self.include_callers.hash(state);
self.include_callees.hash(state);
self.include_imports.hash(state);
self.cross_language.hash(state);
}
}
struct CacheEntry<T> {
data: T,
created_at: Instant,
ttl: Duration,
}
impl<T> CacheEntry<T> {
fn new(data: T, ttl: Duration) -> Self {
Self {
data,
created_at: Instant::now(),
ttl,
}
}
fn is_expired(&self) -> bool {
self.created_at.elapsed() > self.ttl
}
}
#[derive(Debug, Clone)]
pub struct CacheOutcome<T> {
pub data: T,
pub state: CacheState,
pub latency_ms: u64,
}
#[derive(Debug, Clone, Copy, Serialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum CacheState {
Cold,
Warm,
}
#[derive(Debug, Clone, Copy, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CacheEvent {
pub state: CacheState,
pub latency_ms: u64,
pub timestamp_ms: u64,
}
#[derive(Debug, Clone, Default, Serialize)]
pub struct CacheStats {
pub hits: u64,
pub misses: u64,
pub evictions: u64,
pub expired: u64,
}
impl CacheStats {
#[cfg_attr(not(test), allow(dead_code))]
pub fn hit_rate(&self) -> f64 {
let total = self.hits + self.misses;
if total == 0 {
0.0
} else {
let hits = f64::from(u32::try_from(self.hits).unwrap_or(u32::MAX));
let total = f64::from(u32::try_from(total).unwrap_or(u32::MAX));
hits / total
}
}
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LatencyStatsSnapshot {
pub count: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub min_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub average_ms: Option<f64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub p50_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub p90_ms: Option<u64>,
#[serde(skip_serializing_if = "Option::is_none")]
pub p99_ms: Option<u64>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub histogram: Vec<LatencyBucketSnapshot>,
}
impl LatencyStatsSnapshot {
fn from_histogram(hist: &Histogram<u64>) -> Self {
let count = hist.len();
if count == 0 {
return Self {
count: 0,
min_ms: None,
max_ms: None,
average_ms: None,
p50_ms: None,
p90_ms: None,
p99_ms: None,
histogram: Vec::new(),
};
}
let min_ms = hist.min();
let max_ms = hist.max();
let average_ms = hist.mean();
let mut snapshot = Self {
count,
min_ms: Some(min_ms),
max_ms: Some(max_ms),
average_ms: Some(average_ms),
p50_ms: Some(hist.value_at_quantile(0.5)),
p90_ms: Some(hist.value_at_quantile(0.9)),
p99_ms: Some(hist.value_at_quantile(0.99)),
histogram: Vec::new(),
};
snapshot.histogram = histogram_buckets(hist);
snapshot
}
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct LatencyBucketSnapshot {
pub upper_ms: u64,
pub count: u64,
}
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct CacheSnapshot {
pub stats: CacheStats,
#[serde(skip_serializing_if = "Option::is_none")]
pub warm_latency: Option<LatencyStatsSnapshot>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cold_latency: Option<LatencyStatsSnapshot>,
#[serde(skip_serializing_if = "Option::is_none")]
pub last_event: Option<CacheEvent>,
}
const MAX_HISTOGRAM_SAMPLES: u64 = 10_000;
#[derive(Debug)]
struct CacheTelemetry {
stats: CacheStats,
warm_hist: Histogram<u64>,
cold_hist: Histogram<u64>,
last_event: Option<CacheEvent>,
}
impl CacheTelemetry {
fn new() -> Self {
Self {
stats: CacheStats::default(),
warm_hist: Self::create_histogram("warm"),
cold_hist: Self::create_histogram("cold"),
last_event: None,
}
}
fn create_histogram(name: &str) -> Histogram<u64> {
Histogram::new_with_bounds(1, HISTOGRAM_MAX_MS, HISTOGRAM_SIGFIGS).unwrap_or_else(|e| {
tracing::warn!(
"Failed to initialize {} histogram with bounds: {}. Using unbounded fallback.",
name,
e
);
Histogram::new(HISTOGRAM_SIGFIGS).unwrap_or_else(|e2| {
tracing::error!(
"Critical: Failed to create fallback histogram: {}. Using default.",
e2
);
Histogram::new(2).expect("default histogram creation must succeed")
})
})
}
fn reset(&mut self) {
self.stats = CacheStats::default();
self.warm_hist = Self::create_histogram("warm");
self.cold_hist = Self::create_histogram("cold");
self.last_event = None;
}
fn record_hit(&mut self, latency_ms: u64) {
self.stats.hits += 1;
self.record_event(CacheState::Warm, latency_ms);
}
fn record_miss(&mut self, latency_ms: u64) {
self.stats.misses += 1;
self.record_event(CacheState::Cold, latency_ms);
}
fn record_expired(&mut self) {
self.stats.expired += 1;
}
fn record_eviction(&mut self) {
self.stats.evictions += 1;
}
fn record_event(&mut self, state: CacheState, latency_ms: u64) {
let latency = latency_ms.max(1);
let histogram_value = latency.min(HISTOGRAM_MAX_MS);
if latency_ms > HISTOGRAM_MAX_MS {
tracing::warn!(
"Cache {:?} latency {}ms exceeds histogram maximum {}ms - saturating to max for histogram",
state,
latency_ms,
HISTOGRAM_MAX_MS
);
}
match state {
CacheState::Warm => {
if self.warm_hist.len() >= MAX_HISTOGRAM_SAMPLES {
tracing::debug!(
"Warm histogram reached {} samples, rotating to prevent memory growth",
MAX_HISTOGRAM_SAMPLES
);
self.warm_hist.clear();
}
if let Err(e) = self.warm_hist.record(histogram_value) {
tracing::warn!(
"Failed to record warm cache latency {}ms: {}",
histogram_value,
e
);
}
}
CacheState::Cold => {
if self.cold_hist.len() >= MAX_HISTOGRAM_SAMPLES {
tracing::debug!(
"Cold histogram reached {} samples, rotating to prevent memory growth",
MAX_HISTOGRAM_SAMPLES
);
self.cold_hist.clear();
}
if let Err(e) = self.cold_hist.record(histogram_value) {
tracing::warn!(
"Failed to record cold cache latency {}ms: {}",
histogram_value,
e
);
}
}
}
self.last_event = Some(CacheEvent {
state,
latency_ms: latency, timestamp_ms: epoch_ms_now(),
});
}
fn snapshot(&self) -> CacheSnapshot {
CacheSnapshot {
stats: self.stats.clone(),
warm_latency: if self.warm_hist.is_empty() {
None
} else {
Some(LatencyStatsSnapshot::from_histogram(&self.warm_hist))
},
cold_latency: if self.cold_hist.is_empty() {
None
} else {
Some(LatencyStatsSnapshot::from_histogram(&self.cold_hist))
},
last_event: self.last_event,
}
}
}
static TRACE_PATH_CACHE: OnceLock<
Mutex<lru::LruCache<TracePathCacheKey, CacheEntry<TracePathData>>>,
> = OnceLock::new();
static SUBGRAPH_CACHE: OnceLock<
Mutex<lru::LruCache<SubgraphCacheKey, CacheEntry<DependencyGraphData>>>,
> = OnceLock::new();
static QUERY_CACHE_TTL: OnceLock<Duration> = OnceLock::new();
static TRACE_PATH_TELEMETRY: OnceLock<Mutex<CacheTelemetry>> = OnceLock::new();
static SUBGRAPH_TELEMETRY: OnceLock<Mutex<CacheTelemetry>> = OnceLock::new();
pub fn init_trace_path_cache(capacity: std::num::NonZeroUsize, ttl: Duration) {
TRACE_PATH_CACHE.get_or_init(|| {
tracing::info!(
capacity = capacity.get(),
ttl_secs = ttl.as_secs(),
"Initializing trace path cache"
);
Mutex::new(lru::LruCache::new(capacity))
});
QUERY_CACHE_TTL.get_or_init(|| ttl);
TRACE_PATH_TELEMETRY.get_or_init(|| Mutex::new(CacheTelemetry::new()));
}
pub fn init_subgraph_cache(capacity: std::num::NonZeroUsize, ttl: Duration) {
SUBGRAPH_CACHE.get_or_init(|| {
tracing::info!(
capacity = capacity.get(),
ttl_secs = ttl.as_secs(),
"Initializing subgraph cache"
);
Mutex::new(lru::LruCache::new(capacity))
});
QUERY_CACHE_TTL.get_or_init(|| ttl);
SUBGRAPH_TELEMETRY.get_or_init(|| Mutex::new(CacheTelemetry::new()));
}
fn get_cache_ttl() -> Duration {
QUERY_CACHE_TTL.get().copied().unwrap_or(CACHE_TTL)
}
pub fn get_or_compute_trace_path<F>(
key: TracePathCacheKey,
builder: F,
) -> CacheOutcome<TracePathData>
where
F: FnOnce() -> TracePathData,
{
let start = Instant::now();
tracing::debug!(
"Cache lookup: trace_path from={} to={}",
key.from_symbol,
key.to_symbol
);
let cache = TRACE_PATH_CACHE
.get()
.expect("Trace path cache not initialized - call init_trace_path_cache() first");
let (cache_result, was_expired) = {
let mut lock = cache.lock();
if let Some(entry) = lock.get(&key) {
if entry.is_expired() {
lock.pop(&key);
(None, true)
} else {
let data = entry.data.clone();
(Some(data), false)
}
} else {
(None, false)
}
};
if let Some(data) = cache_result {
let latency_ms = elapsed_ms(start.elapsed());
let telemetry = TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized");
telemetry.lock().record_hit(latency_ms);
tracing::debug!("Cache HIT: trace_path latency={}ms", latency_ms);
return CacheOutcome {
data,
state: CacheState::Warm,
latency_ms,
};
}
if was_expired {
let telemetry = TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized");
telemetry.lock().record_expired();
}
let result = builder();
let ttl = get_cache_ttl();
{
let mut lock = cache.lock();
let evicted = lock.push(key, CacheEntry::new(result.clone(), ttl));
if evicted.is_some() {
let telemetry = TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized");
telemetry.lock().record_eviction();
}
}
let latency_ms = elapsed_ms(start.elapsed());
let telemetry = TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized");
telemetry.lock().record_miss(latency_ms);
tracing::debug!("Cache MISS: trace_path latency={}ms", latency_ms);
CacheOutcome {
data: result,
state: CacheState::Cold,
latency_ms,
}
}
pub fn get_or_compute_subgraph<F>(
key: SubgraphCacheKey,
builder: F,
) -> CacheOutcome<DependencyGraphData>
where
F: FnOnce() -> DependencyGraphData,
{
let start = Instant::now();
tracing::debug!(
"Cache lookup: subgraph symbols={:?}",
key.symbols.iter().take(3).collect::<Vec<_>>()
);
let cache = SUBGRAPH_CACHE
.get()
.expect("Subgraph cache not initialized - call init_subgraph_cache() first");
let (cache_result, was_expired) = {
let mut lock = cache.lock();
if let Some(entry) = lock.get(&key) {
if entry.is_expired() {
lock.pop(&key);
(None, true)
} else {
let data = entry.data.clone();
(Some(data), false)
}
} else {
(None, false)
}
};
if let Some(data) = cache_result {
let latency_ms = elapsed_ms(start.elapsed());
let telemetry = SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized");
telemetry.lock().record_hit(latency_ms);
tracing::debug!("Cache HIT: subgraph latency={}ms", latency_ms);
return CacheOutcome {
data,
state: CacheState::Warm,
latency_ms,
};
}
if was_expired {
let telemetry = SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized");
telemetry.lock().record_expired();
}
let result = builder();
let ttl = get_cache_ttl();
{
let mut lock = cache.lock();
let evicted = lock.push(key, CacheEntry::new(result.clone(), ttl));
if evicted.is_some() {
let telemetry = SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized");
telemetry.lock().record_eviction();
}
}
let latency_ms = elapsed_ms(start.elapsed());
let telemetry = SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized");
telemetry.lock().record_miss(latency_ms);
tracing::debug!("Cache MISS: subgraph latency={}ms", latency_ms);
CacheOutcome {
data: result,
state: CacheState::Cold,
latency_ms,
}
}
#[cfg_attr(not(test), allow(dead_code))]
pub fn trace_path_cache_stats() -> CacheStats {
TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized")
.lock()
.stats
.clone()
}
#[cfg_attr(not(test), allow(dead_code))]
pub fn subgraph_cache_stats() -> CacheStats {
SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized")
.lock()
.stats
.clone()
}
pub fn trace_path_cache_snapshot() -> CacheSnapshot {
TRACE_PATH_TELEMETRY
.get()
.expect("Trace path telemetry not initialized")
.lock()
.snapshot()
}
pub fn subgraph_cache_snapshot() -> CacheSnapshot {
SUBGRAPH_TELEMETRY
.get()
.expect("Subgraph telemetry not initialized")
.lock()
.snapshot()
}
#[cfg_attr(not(test), allow(dead_code))]
pub fn clear_all_caches() {
if let Some(cache) = TRACE_PATH_CACHE.get() {
cache.lock().clear();
}
if let Some(cache) = SUBGRAPH_CACHE.get() {
cache.lock().clear();
}
if let Some(telemetry) = TRACE_PATH_TELEMETRY.get() {
telemetry.lock().reset();
}
if let Some(telemetry) = SUBGRAPH_TELEMETRY.get() {
telemetry.lock().reset();
}
}
#[cfg_attr(not(test), allow(dead_code))]
pub fn cache_sizes() -> (usize, usize) {
let trace_size = TRACE_PATH_CACHE.get().map_or(0, |c| c.lock().len());
let subgraph_size = SUBGRAPH_CACHE.get().map_or(0, |c| c.lock().len());
(trace_size, subgraph_size)
}
fn histogram_buckets(histogram: &Histogram<u64>) -> Vec<LatencyBucketSnapshot> {
let mut buckets = Vec::new();
let mut lower_bound = 1;
for &upper in HISTOGRAM_BUCKET_BOUNDS {
let count = histogram.count_between(lower_bound, upper);
if count > 0 {
buckets.push(LatencyBucketSnapshot {
upper_ms: upper,
count,
});
}
lower_bound = upper + 1;
}
let max_bound = *HISTOGRAM_BUCKET_BOUNDS.last().unwrap_or(&HISTOGRAM_MAX_MS);
for &overflow_bound in OVERFLOW_BUCKET_BOUNDS {
if overflow_bound > max_bound {
let count = histogram.count_between(lower_bound, overflow_bound);
if count > 0 {
buckets.push(LatencyBucketSnapshot {
upper_ms: overflow_bound,
count,
});
if count > 10 {
tracing::warn!(
"High number of cache operations ({}) exceeded {}ms threshold",
count,
overflow_bound
);
}
}
lower_bound = overflow_bound + 1;
}
}
let final_overflow = histogram.count_between(lower_bound, u64::MAX);
if final_overflow > 0 {
tracing::warn!(
"Extreme outliers detected: {} cache operations exceeded {}ms (10min)",
final_overflow,
OVERFLOW_BUCKET_BOUNDS.last().unwrap_or(&HISTOGRAM_MAX_MS)
);
buckets.push(LatencyBucketSnapshot {
upper_ms: u64::MAX,
count: final_overflow,
});
}
buckets
}
fn elapsed_ms(duration: Duration) -> u64 {
let millis = duration.as_secs_f64() * 1000.0;
if millis < 1.0 {
1
} else {
#[allow(
clippy::cast_sign_loss,
clippy::cast_precision_loss,
clippy::cast_possible_truncation
)] {
millis.round().clamp(1.0, u64::MAX as f64) as u64
}
}
}
fn epoch_ms_now() -> u64 {
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map_or_else(
|e| {
tracing::error!("Failed to get system time: {e}, using 0");
0
},
|d| {
d.as_millis().try_into().unwrap_or_else(|_| {
tracing::error!(
"System time milliseconds exceed u64::MAX, using saturated value"
);
u64::MAX
})
},
)
}
#[cfg(test)]
mod tests {
use super::*;
use serial_test::serial;
use std::sync::atomic::{AtomicUsize, Ordering};
#[test]
#[serial]
fn test_trace_path_cache_key_equality() {
let key1 = TracePathCacheKey::new("foo".to_string(), "bar".to_string(), 5, 10, true, 0.5);
let key2 = TracePathCacheKey::new("foo".to_string(), "bar".to_string(), 5, 10, true, 0.5);
assert_eq!(key1, key2);
}
#[test]
#[serial]
fn test_subgraph_cache_key_deterministic() {
let key1 = SubgraphCacheKey::new(
vec!["z".to_string(), "a".to_string(), "m".to_string()],
2,
50,
true,
true,
false,
true,
);
let key2 = SubgraphCacheKey::new(
vec!["a".to_string(), "m".to_string(), "z".to_string()],
2,
50,
true,
true,
false,
true,
);
assert_eq!(key1, key2);
}
#[test]
#[serial]
fn test_trace_path_cache_hit() {
init_trace_path_cache(
std::num::NonZeroUsize::new(TRACE_PATH_CACHE_CAPACITY).unwrap(),
Duration::from_secs(CACHE_TTL_SECS),
);
clear_all_caches();
#[allow(clippy::items_after_statements)] static CALL_COUNT: AtomicUsize = AtomicUsize::new(0);
let key = TracePathCacheKey::new(
"test_from".to_string(),
"test_to".to_string(),
5,
10,
true,
0.5,
);
let builder = || {
CALL_COUNT.fetch_add(1, Ordering::SeqCst);
TracePathData {
paths: vec![],
from_symbol: "test_from".to_string(),
to_symbol: "test_to".to_string(),
}
};
let stats_before = trace_path_cache_stats();
let outcome1 = get_or_compute_trace_path(key.clone(), builder);
assert_eq!(outcome1.state, CacheState::Cold);
let result1 = outcome1.data;
assert_eq!(CALL_COUNT.load(Ordering::SeqCst), 1);
assert_eq!(result1.from_symbol, "test_from");
let outcome2 = get_or_compute_trace_path(key.clone(), builder);
assert_eq!(outcome2.state, CacheState::Warm);
let result2 = outcome2.data;
assert_eq!(
CALL_COUNT.load(Ordering::SeqCst),
1,
"Builder should not be called on cache hit"
);
assert_eq!(result2.from_symbol, "test_from");
let stats_after = trace_path_cache_stats();
assert_eq!(stats_after.hits.saturating_sub(stats_before.hits), 1);
assert_eq!(stats_after.misses.saturating_sub(stats_before.misses), 1);
}
#[test]
#[serial]
fn test_cache_eviction() {
init_trace_path_cache(
std::num::NonZeroUsize::new(TRACE_PATH_CACHE_CAPACITY).unwrap(),
Duration::from_secs(CACHE_TTL_SECS),
);
clear_all_caches();
for i in 0..TRACE_PATH_CACHE_CAPACITY + 10 {
let key =
TracePathCacheKey::new(format!("from_{i}"), format!("to_{i}"), 5, 10, true, 0.5);
get_or_compute_trace_path(key, || TracePathData {
paths: vec![],
from_symbol: format!("from_{i}"),
to_symbol: format!("to_{i}"),
});
}
let (trace_size, _) = cache_sizes();
assert!(
trace_size <= TRACE_PATH_CACHE_CAPACITY,
"Cache should not exceed max capacity"
);
let stats = trace_path_cache_stats();
assert!(stats.evictions > 0, "Evictions should have occurred");
let snapshot = trace_path_cache_snapshot();
assert!(snapshot.cold_latency.is_some());
}
#[test]
#[serial]
fn test_subgraph_cache_snapshot_reports_metrics() {
init_subgraph_cache(
std::num::NonZeroUsize::new(SUBGRAPH_CACHE_CAPACITY).unwrap(),
Duration::from_secs(CACHE_TTL_SECS),
);
clear_all_caches();
let key = SubgraphCacheKey::new(vec!["root".into()], 2, 10, true, true, false, true);
let builder = || DependencyGraphData {
nodes: vec![],
edges: vec![],
rendered: None,
};
let _ = get_or_compute_subgraph(key.clone(), builder);
let _ = get_or_compute_subgraph(key, builder);
let snapshot = subgraph_cache_snapshot();
assert!(snapshot.stats.hits >= 1);
assert!(snapshot.stats.misses >= 1);
assert!(snapshot.last_event.is_some());
let aggregated = subgraph_cache_stats();
assert!(aggregated.hits >= 1);
}
}