use super::*;
use crate::backend::{BackendError, DispatchConfig, VyreBackend};
use std::sync::Arc;
use vyre_foundation::ir::Program;
mod on_disk {
use std::fmt::Write as _;
use std::fs;
use std::io;
use std::path::{Path, PathBuf};
use super::{PipelineFeatureFlags, CURRENT_PIPELINE_CACHE_KEY_VERSION};
use blake3::Hasher;
pub(super) const CACHE_EXTENSION: &str = "bin";
#[must_use]
pub(super) fn compute_cache_key(
program_wire: &[u8],
backend_id: &str,
driver_version: &str,
device_gen: &str,
feature_flags: PipelineFeatureFlags,
) -> [u8; 32] {
let mut hasher = Hasher::new();
hasher.update(&CURRENT_PIPELINE_CACHE_KEY_VERSION.to_le_bytes());
hasher.update(&(backend_id.len() as u32).to_le_bytes());
hasher.update(backend_id.as_bytes());
hasher.update(&(driver_version.len() as u32).to_le_bytes());
hasher.update(driver_version.as_bytes());
hasher.update(&(device_gen.len() as u32).to_le_bytes());
hasher.update(device_gen.as_bytes());
hasher.update(&feature_flags.0.to_le_bytes());
hasher.update(&(program_wire.len() as u64).to_le_bytes());
hasher.update(program_wire);
let mut out = [0_u8; 32];
out.copy_from_slice(hasher.finalize().as_bytes());
out
}
#[must_use]
pub(super) fn cache_path(cache_dir: &Path, key: &[u8; 32]) -> PathBuf {
let mut name = String::with_capacity(64 + 1 + CACHE_EXTENSION.len());
for b in key {
let _ = write!(&mut name, "{b:02x}");
}
name.push('.');
name.push_str(CACHE_EXTENSION);
cache_dir.join(name)
}
pub(super) fn load(cache_dir: &Path, key: &[u8; 32]) -> Result<Option<Vec<u8>>, CacheError> {
let path = cache_path(cache_dir, key);
match fs::read(&path) {
Ok(bytes) => Ok(Some(bytes)),
Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(None),
Err(e) => Err(CacheError::Io { path, source: e }),
}
}
pub(super) fn store(cache_dir: &Path, key: &[u8; 32], bytes: &[u8]) -> Result<(), CacheError> {
fs::create_dir_all(cache_dir).map_err(|e| CacheError::Io {
path: cache_dir.to_path_buf(),
source: e,
})?;
let final_path = cache_path(cache_dir, key);
let tmp_path = final_path.with_extension("bin.tmp");
fs::write(&tmp_path, bytes).map_err(|e| CacheError::Io {
path: tmp_path.clone(),
source: e,
})?;
fs::rename(&tmp_path, &final_path).map_err(|e| CacheError::Io {
path: final_path,
source: e,
})
}
#[derive(Debug, thiserror::Error)]
pub(super) enum CacheError {
#[error(
"Fix: pipeline-cache I/O failed at {path:?}. \
Ensure the cache directory is writable: {source}"
)]
Io {
path: PathBuf,
#[source]
source: io::Error,
},
}
#[cfg(test)]
mod tests {
use super::*;
fn key1() -> [u8; 32] {
[1_u8; 32]
}
fn key2() -> [u8; 32] {
[2_u8; 32]
}
#[test]
fn compute_cache_key_is_deterministic() {
let a = compute_cache_key(
b"bytes",
"backend-a",
"v24",
"ada",
PipelineFeatureFlags::SUBGROUP_OPS,
);
let b = compute_cache_key(
b"bytes",
"backend-a",
"v24",
"ada",
PipelineFeatureFlags::SUBGROUP_OPS,
);
assert_eq!(a, b);
}
#[test]
fn compute_cache_key_changes_with_driver_version() {
let a = compute_cache_key(
b"x",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::empty(),
);
let b = compute_cache_key(
b"x",
"backend-a",
"v25",
"gen-a",
PipelineFeatureFlags::empty(),
);
assert_ne!(a, b);
}
#[test]
fn compute_cache_key_changes_with_device_gen() {
let a = compute_cache_key(
b"x",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::empty(),
);
let b = compute_cache_key(
b"x",
"backend-a",
"v24",
"gen-b",
PipelineFeatureFlags::empty(),
);
assert_ne!(a, b);
}
#[test]
fn compute_cache_key_changes_with_feature_flags() {
let a = compute_cache_key(
b"x",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::empty(),
);
let b = compute_cache_key(
b"x",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::SUBGROUP_OPS,
);
assert_ne!(a, b);
}
#[test]
fn compute_cache_key_changes_with_program_bytes() {
let a = compute_cache_key(
b"prog-a",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::empty(),
);
let b = compute_cache_key(
b"prog-b",
"backend-a",
"v24",
"gen-a",
PipelineFeatureFlags::empty(),
);
assert_ne!(a, b);
}
#[test]
fn compute_cache_key_not_vulnerable_to_length_extension() {
let a = compute_cache_key(b"", "ab", "cd", "gen-a", PipelineFeatureFlags::empty());
let b = compute_cache_key(b"", "abc", "d", "gen-a", PipelineFeatureFlags::empty());
assert_ne!(a, b);
}
#[test]
fn cache_path_is_hex_and_bin_extension() {
let d = Path::new("/tmp");
let p = cache_path(d, &[0xAB_u8; 32]);
let fname = p.file_name().unwrap().to_string_lossy().to_string();
assert!(fname.ends_with(".bin"));
assert!(fname.contains("abababab"));
assert_eq!(fname.len(), 64 + 4); }
#[test]
fn load_miss_returns_none() {
let dir = tempfile::tempdir().unwrap();
let r = load(dir.path(), &key1()).unwrap();
assert!(r.is_none());
}
#[test]
fn store_then_load_roundtrips() {
let dir = tempfile::tempdir().unwrap();
let payload = b"compiled-target-bytes".to_vec();
store(dir.path(), &key1(), &payload).unwrap();
let loaded = load(dir.path(), &key1()).unwrap();
assert_eq!(loaded.as_deref(), Some(payload.as_slice()));
}
#[test]
fn store_creates_missing_cache_dir() {
let parent = tempfile::tempdir().unwrap();
let nested = parent.path().join("a").join("b").join("c");
assert!(!nested.exists());
store(&nested, &key1(), b"blob").unwrap();
let loaded = load(&nested, &key1()).unwrap();
assert_eq!(loaded.as_deref(), Some(b"blob".as_slice()));
}
#[test]
fn different_keys_do_not_overlap() {
let dir = tempfile::tempdir().unwrap();
store(dir.path(), &key1(), b"one").unwrap();
store(dir.path(), &key2(), b"two").unwrap();
assert_eq!(
load(dir.path(), &key1()).unwrap().as_deref(),
Some(b"one".as_slice())
);
assert_eq!(
load(dir.path(), &key2()).unwrap().as_deref(),
Some(b"two".as_slice())
);
}
#[test]
fn overwriting_same_key_preserves_atomicity() {
let dir = tempfile::tempdir().unwrap();
store(dir.path(), &key1(), b"first").unwrap();
store(dir.path(), &key1(), b"second").unwrap();
assert_eq!(
load(dir.path(), &key1()).unwrap().as_deref(),
Some(b"second".as_slice())
);
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::backend::CompiledPipeline;
use vyre_foundation::ir::{BufferDecl, DataType, Expr, Node};
#[derive(Default)]
struct CountingBackend {
calls: std::sync::Mutex<usize>,
}
impl crate::backend::private::Sealed for CountingBackend {}
impl VyreBackend for CountingBackend {
fn id(&self) -> &'static str {
"counting"
}
fn dispatch(
&self,
_program: &Program,
inputs: &[Vec<u8>],
_config: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
*self.calls.lock().unwrap() += 1;
Ok(inputs.to_vec())
}
}
fn empty_program() -> Program {
Program::default()
}
fn store_program(output_name: &'static str, value: u32) -> Program {
Program::wrapped(
vec![BufferDecl::output(output_name, 0, DataType::U32).with_count(1)],
[64, 1, 1],
vec![Node::store(output_name, Expr::u32(0), Expr::u32(value))],
)
}
#[test]
fn normalized_program_digest_tracks_program_structure() {
let a = store_program("out", 7);
let b = store_program("out", 8);
assert_ne!(
normalized_program_cache_digest(&a),
normalized_program_cache_digest(&b),
"Fix: backend shader caches must miss when program semantics change."
);
assert_eq!(
normalized_program_cache_digest(&a),
normalized_program_cache_digest(&a),
"Fix: normalized program cache digests must be deterministic."
);
}
#[test]
fn dispatch_policy_cache_hash_tracks_codegen_policy() {
let base = DispatchConfig {
ulp_budget: Some(1),
workgroup_override: Some([64, 1, 1]),
..Default::default()
};
let mut changed = base.clone();
changed.workgroup_override = Some([128, 1, 1]);
let mut a = blake3::Hasher::new();
update_dispatch_policy_cache_hash(&mut a, &base);
let mut b = blake3::Hasher::new();
update_dispatch_policy_cache_hash(&mut b, &changed);
assert_ne!(a.finalize(), b.finalize());
assert_eq!(
dispatch_policy_cache_string(&base),
"ulp=Some(1):wg=Some([64, 1, 1])"
);
}
#[test]
fn shared_disk_pipeline_cache_round_trips_and_shards() {
let dir = tempfile::tempdir().unwrap();
let cache = DiskPipelineCache::open(dir.path()).unwrap();
let fp = PipelineDeviceFingerprint::from_parts(1, 2, "driver-a", "runtime-b");
let key = [7_u8; 32];
let path = cache.path_for(key, fp);
let cache_key = fp.cache_key(key);
let cache_key_hex = hex_encode(&cache_key);
assert_eq!(
path.parent().and_then(std::path::Path::file_name),
Some(std::ffi::OsStr::new(&cache_key_hex[..2])),
"Fix: cryptographic device fingerprinting must happen before shard path derivation."
);
assert!(cache.read(key, fp).unwrap().is_none());
cache.write(key, fp, b"compiled bytes").unwrap();
assert_eq!(
cache.read(key, fp).unwrap().as_deref(),
Some(b"compiled bytes".as_slice())
);
}
#[test]
fn subgroup_reduction_offsets_derive_from_size() {
assert_eq!(crate::subgroup::reduction_offsets(32), vec![16, 8, 4, 2, 1]);
assert_eq!(crate::subgroup::reduction_offsets(8), vec![4, 2, 1]);
}
#[test]
fn passthrough_routes_every_dispatch_to_backend() {
let backend = Arc::new(CountingBackend::default());
let pipeline = compile(
backend.clone(),
&empty_program(),
&DispatchConfig::default(),
)
.unwrap();
let inputs = vec![vec![1u8, 2, 3]];
for _ in 0..10 {
let out = pipeline
.dispatch(&inputs, &DispatchConfig::default())
.unwrap();
assert_eq!(out, inputs);
}
assert_eq!(*backend.calls.lock().unwrap(), 10);
}
#[test]
fn compile_owned_routes_without_borrowed_program_clone() {
let backend = Arc::new(CountingBackend::default());
let pipeline =
compile_owned(backend.clone(), empty_program(), &DispatchConfig::default()).unwrap();
let inputs = vec![vec![4_u8, 5, 6]];
let out = pipeline
.dispatch(&inputs, &DispatchConfig::default())
.unwrap();
assert_eq!(out, inputs);
assert_eq!(*backend.calls.lock().unwrap(), 1);
}
#[test]
fn compile_owned_with_telemetry_returns_pipeline() {
let backend = Arc::new(CountingBackend::default());
let build =
compile_owned_with_telemetry(backend, empty_program(), &DispatchConfig::default())
.unwrap();
assert!(build.pipeline.id().starts_with("counting:"));
assert_eq!(build.manifest.backend_id, "counting");
assert_eq!(build.manifest.pipeline_id, build.pipeline.id());
assert_eq!(build.manifest.schema, PipelineReproManifest::SCHEMA);
let json = build
.manifest
.to_json()
.expect("manifest JSON must serialize");
assert!(json.contains("\"program_digest\""));
}
#[test]
fn pipeline_cache_audit_tracks_hits_misses_and_unknowns() {
let mut audit = PipelineCacheAudit::new();
audit.observe(Some(true));
audit.observe(Some(true));
audit.observe(Some(false));
audit.observe(None);
let report = audit.snapshot(7_000);
assert_eq!(report.hits, 2);
assert_eq!(report.misses, 1);
assert_eq!(report.unknowns, 1);
assert_eq!(report.hit_rate_bps, Some(6_666));
assert!(report.below_alarm_threshold);
}
#[test]
fn pipeline_cache_audit_no_data_has_no_alarm() {
let audit = PipelineCacheAudit::new();
let report = audit.snapshot(9_000);
assert_eq!(report.hit_rate_bps, None);
assert!(!report.below_alarm_threshold);
}
#[test]
fn pipeline_cache_audit_zero_threshold_disables_alarm() {
let mut audit = PipelineCacheAudit::new();
audit.observe(Some(false));
let report = audit.snapshot(0);
assert_eq!(report.hit_rate_bps, Some(0));
assert!(!report.below_alarm_threshold);
}
#[test]
fn prewarm_materializes_pipeline_without_dispatching() {
let backend = Arc::new(CountingBackend::default());
let report = prewarm_owned(backend.clone(), empty_program(), &DispatchConfig::default())
.expect("prewarm must compile through the same path as pipeline mode");
assert!(report.pipeline_id.starts_with("counting:"));
assert_eq!(report.manifest.pipeline_id, report.pipeline_id);
assert_eq!(
*backend.calls.lock().unwrap(),
0,
"Fix: prewarm must remove compile/reflection from the hot path without running the program."
);
}
#[test]
fn passthrough_id_includes_backend_id() {
let backend = Arc::new(CountingBackend::default());
let pipeline = compile(backend, &empty_program(), &DispatchConfig::default()).unwrap();
assert!(pipeline.id().starts_with("counting:"));
}
#[test]
fn passthrough_dispatch_borrowed_uses_backend_borrowed_override() {
#[derive(Default)]
struct BorrowRecordingBackend {
owned_calls: std::sync::Mutex<usize>,
borrowed_calls: std::sync::Mutex<usize>,
}
impl crate::backend::private::Sealed for BorrowRecordingBackend {}
impl VyreBackend for BorrowRecordingBackend {
fn id(&self) -> &'static str {
"borrow-recording"
}
fn dispatch(
&self,
_program: &Program,
inputs: &[Vec<u8>],
_config: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
*self.owned_calls.lock().unwrap() += 1;
Ok(inputs.to_vec())
}
fn dispatch_borrowed(
&self,
_program: &Program,
inputs: &[&[u8]],
_config: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
*self.borrowed_calls.lock().unwrap() += 1;
Ok(inputs.iter().map(|input| (*input).to_vec()).collect())
}
}
let backend = Arc::new(BorrowRecordingBackend::default());
let pipeline = compile(
backend.clone(),
&empty_program(),
&DispatchConfig::default(),
)
.unwrap();
let input = [7u8, 8, 9];
let out = pipeline
.dispatch_borrowed(&[input.as_slice()], &DispatchConfig::default())
.unwrap();
assert_eq!(out, vec![input.to_vec()]);
assert_eq!(*backend.borrowed_calls.lock().unwrap(), 1);
assert_eq!(*backend.owned_calls.lock().unwrap(), 0);
}
#[test]
fn compiled_pipeline_borrowed_batch_default_preserves_order() {
#[derive(Default)]
struct BatchDefaultPipeline {
calls: std::sync::Mutex<Vec<Vec<u8>>>,
}
impl crate::backend::private::Sealed for BatchDefaultPipeline {}
impl CompiledPipeline for BatchDefaultPipeline {
fn id(&self) -> &str {
"batch-default"
}
fn dispatch(
&self,
_: &[Vec<u8>],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
Err(BackendError::new(
"batch default test should use dispatch_borrowed. Fix: keep borrowed batch default zero-copy until each single dispatch.",
))
}
fn dispatch_borrowed(
&self,
inputs: &[&[u8]],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
let first = inputs.first().copied().unwrap_or_default().to_vec();
self.calls.lock().unwrap().push(first.clone());
Ok(vec![first])
}
}
let pipeline = BatchDefaultPipeline::default();
let a = [1_u8, 2];
let b = [3_u8, 4];
let batch_a: [&[u8]; 1] = [a.as_slice()];
let batch_b: [&[u8]; 1] = [b.as_slice()];
let batches: [&[&[u8]]; 2] = [&batch_a, &batch_b];
let outputs = pipeline
.dispatch_borrowed_batched(&batches, &DispatchConfig::default())
.unwrap();
assert_eq!(outputs, vec![vec![a.to_vec()], vec![b.to_vec()]]);
assert_eq!(
*pipeline.calls.lock().unwrap(),
vec![a.to_vec(), b.to_vec()]
);
}
#[test]
fn per_call_config_overrides_compile_config() {
struct ProfileEcho {
seen: std::sync::Mutex<Vec<Option<String>>>,
}
impl crate::backend::private::Sealed for ProfileEcho {}
impl VyreBackend for ProfileEcho {
fn id(&self) -> &'static str {
"profile-echo"
}
fn dispatch(
&self,
_program: &Program,
_inputs: &[Vec<u8>],
config: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
self.seen.lock().unwrap().push(config.profile.clone());
Ok(vec![])
}
}
let backend = Arc::new(ProfileEcho {
seen: Default::default(),
});
let compile_cfg = DispatchConfig {
profile: Some("compile-time".to_string()),
ulp_budget: None,
..DispatchConfig::default()
};
let pipeline = compile(backend.clone(), &empty_program(), &compile_cfg).unwrap();
pipeline.dispatch(&[], &DispatchConfig::default()).unwrap();
pipeline
.dispatch(
&[],
&DispatchConfig {
profile: Some("per-call".to_string()),
ulp_budget: None,
..DispatchConfig::default()
},
)
.unwrap();
let seen = backend.seen.lock().unwrap();
assert_eq!(seen[0], Some("compile-time".to_string()));
assert_eq!(seen[1], Some("per-call".to_string()));
}
#[test]
fn native_pipeline_is_used_when_backend_provides_one() {
struct NativePipeline;
impl crate::backend::private::Sealed for NativePipeline {}
impl CompiledPipeline for NativePipeline {
fn id(&self) -> &str {
"native-pipeline"
}
fn dispatch(
&self,
_: &[Vec<u8>],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
Ok(vec![vec![42]])
}
}
struct NativeBackend;
impl crate::backend::private::Sealed for NativeBackend {}
impl VyreBackend for NativeBackend {
fn id(&self) -> &'static str {
"native"
}
fn dispatch(
&self,
_: &Program,
_: &[Vec<u8>],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
Err(BackendError::new(
"native backend should be reached via compile, not dispatch. \
Fix: use vyre::pipeline::compile then call CompiledPipeline::dispatch.",
))
}
fn compile_native(
&self,
_: &Program,
_: &DispatchConfig,
) -> Result<Option<Arc<dyn CompiledPipeline>>, BackendError> {
Ok(Some(Arc::new(NativePipeline)))
}
}
let backend = Arc::new(NativeBackend);
let pipeline = compile(backend, &empty_program(), &DispatchConfig::default()).unwrap();
assert_eq!(pipeline.id(), "native-pipeline");
let outputs = pipeline.dispatch(&[], &DispatchConfig::default()).unwrap();
assert_eq!(outputs, vec![vec![42]]);
}
#[test]
fn prewarm_reports_backend_cache_telemetry() {
struct WarmPipeline;
impl crate::backend::private::Sealed for WarmPipeline {}
impl CompiledPipeline for WarmPipeline {
fn id(&self) -> &str {
"warm-native"
}
fn dispatch(
&self,
_: &[Vec<u8>],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
Ok(Vec::new())
}
}
#[derive(Default)]
struct WarmBackend {
compiles: std::sync::Mutex<u64>,
hits: std::sync::Mutex<u64>,
misses: std::sync::Mutex<u64>,
}
impl crate::backend::private::Sealed for WarmBackend {}
impl VyreBackend for WarmBackend {
fn id(&self) -> &'static str {
"warm"
}
fn dispatch(
&self,
_: &Program,
_: &[Vec<u8>],
_: &DispatchConfig,
) -> Result<Vec<Vec<u8>>, BackendError> {
Err(BackendError::new(
"prewarm test backend should never dispatch. Fix: keep prewarm on the compile path.",
))
}
fn compile_native(
&self,
_: &Program,
_: &DispatchConfig,
) -> Result<Option<Arc<dyn CompiledPipeline>>, BackendError> {
let mut compiles = self.compiles.lock().unwrap();
if *compiles == 0 {
*self.misses.lock().unwrap() += 1;
} else {
*self.hits.lock().unwrap() += 1;
}
*compiles += 1;
Ok(Some(Arc::new(WarmPipeline)))
}
fn pipeline_cache_snapshot(&self) -> Option<PipelineCacheSnapshot> {
Some(PipelineCacheSnapshot {
hits: *self.hits.lock().unwrap(),
misses: *self.misses.lock().unwrap(),
})
}
}
let backend = Arc::new(WarmBackend::default());
let cold = prewarm(
backend.clone(),
&empty_program(),
&DispatchConfig::default(),
)
.expect("cold prewarm should compile");
let hot = prewarm(backend, &empty_program(), &DispatchConfig::default())
.expect("hot prewarm should hit cache telemetry");
assert_eq!(cold.pipeline_id, "warm-native");
assert_eq!(cold.cache_hit, Some(false));
assert_eq!(hot.cache_hit, Some(true));
}
#[test]
#[allow(deprecated)]
fn compile_rejects_non_region_programs() {
let backend = Arc::new(CountingBackend::default());
let program = Program::new(
vec![BufferDecl::output("out", 0, DataType::U32).with_count(1)],
[1, 1, 1],
vec![Node::store("out", Expr::u32(0), Expr::u32(9)), Node::Return],
);
let error = match compile(backend, &program, &DispatchConfig::default()) {
Ok(_) => panic!("Fix: runtime admission must reject raw top-level statements"),
Err(error) => error,
};
assert!(
error
.to_string()
.contains("top-level Region-wrapped Program"),
"Fix: runtime admission rejection must mention the region invariant, got: {error}"
);
}
}
#[cfg(test)]
mod c6_pipeline_cache_audit_tests {
use super::super::{PipelineCacheAudit, PipelineCacheAuditReport};
#[test]
fn empty_audit_reports_no_data_and_no_alarm() {
let audit = PipelineCacheAudit::new();
let report = audit.snapshot(8000);
assert_eq!(
report,
PipelineCacheAuditReport {
hits: 0,
misses: 0,
unknowns: 0,
hit_rate_bps: None,
below_alarm_threshold: false,
}
);
}
#[test]
fn audit_computes_hit_rate_bps_correctly() {
let mut audit = PipelineCacheAudit::new();
audit.observe(Some(true));
audit.observe(Some(true));
audit.observe(Some(true));
audit.observe(Some(false));
let report = audit.snapshot(0);
assert_eq!(report.hits, 3);
assert_eq!(report.misses, 1);
assert_eq!(report.hit_rate_bps, Some(7500));
}
#[test]
fn audit_excludes_unknowns_from_rate_denominator() {
let mut audit = PipelineCacheAudit::new();
audit.observe(Some(true));
audit.observe(None);
audit.observe(None);
audit.observe(Some(false));
let report = audit.snapshot(0);
assert_eq!(report.hits, 1);
assert_eq!(report.misses, 1);
assert_eq!(report.unknowns, 2);
assert_eq!(report.hit_rate_bps, Some(5000));
}
#[test]
fn audit_alarms_when_hit_rate_below_threshold() {
let mut audit = PipelineCacheAudit::new();
for _ in 0..3 {
audit.observe(Some(true));
}
for _ in 0..7 {
audit.observe(Some(false));
}
let report = audit.snapshot(8000);
assert_eq!(report.hit_rate_bps, Some(3000));
assert!(report.below_alarm_threshold);
}
#[test]
fn audit_does_not_alarm_at_exactly_threshold() {
let mut audit = PipelineCacheAudit::new();
for _ in 0..8 {
audit.observe(Some(true));
}
for _ in 0..2 {
audit.observe(Some(false));
}
let report = audit.snapshot(8000);
assert_eq!(report.hit_rate_bps, Some(8000));
assert!(!report.below_alarm_threshold);
}
#[test]
fn audit_alarm_disabled_with_zero_threshold() {
let mut audit = PipelineCacheAudit::new();
for _ in 0..5 {
audit.observe(Some(false));
}
let report = audit.snapshot(0);
assert_eq!(report.hit_rate_bps, Some(0));
assert!(!report.below_alarm_threshold);
}
#[test]
fn audit_no_alarm_when_no_data_even_with_threshold() {
let mut audit = PipelineCacheAudit::new();
audit.observe(None);
audit.observe(None);
let report = audit.snapshot(8000);
assert_eq!(report.hit_rate_bps, None);
assert!(!report.below_alarm_threshold);
}
}