use crate::math::{exp_f32, round_f32};
pub const NOM_EPISODES: u32 = 87;
pub const NOM_PRECISION: f32 = 0.736;
pub const NOM_RECALL: f32 = 0.951;
pub const NOM_EVENT_TOTAL: u32 = 102;
pub const CLEAN_WINDOW_COUNT: u32 = 1124;
pub const CLEAN_FALSE_COUNT: u32 = 52;
pub const NOM_TP: u32 = 64;
pub const NOM_FP: u32 = 23;
pub const NOM_FALSE_RATE: f32 = 0.046_264;
pub const RHO_STEPS: usize = 11;
pub const WPRED_CELLS: usize = 12;
pub const CONFIG_CELLS: usize = 27;
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct RhoSweepCell {
pub rho_scale: f32,
pub episode_count: u32,
pub tp_count: u32,
pub precision: f32,
pub recall: f32,
pub false_rate: f32,
}
pub struct RhoSweepResult {
pub cells: [RhoSweepCell; RHO_STEPS],
pub nominal_idx: usize,
}
impl RhoSweepResult {
pub fn best_precision_recall(&self) -> &RhoSweepCell {
let mut best = &self.cells[0];
let mut best_pr = 0.0_f32;
for c in &self.cells {
let pr = c.precision * c.recall;
if pr > best_pr {
best_pr = pr;
best = c;
}
}
best
}
#[inline]
pub fn nominal(&self) -> &RhoSweepCell {
&self.cells[self.nominal_idx]
}
}
pub fn run_rho_sweep() -> RhoSweepResult {
const K_FP: f32 = 3.0;
const K_TP: f32 = 16.7;
const NOM_IDX: usize = 5; debug_assert!(RHO_STEPS > NOM_IDX, "RHO_STEPS must bracket nominal index");
debug_assert!(NOM_EVENT_TOTAL > 0, "nominal event total must be positive");
let mut cells = [RhoSweepCell {
rho_scale: 1.0,
episode_count: 0,
tp_count: 0,
precision: 0.0,
recall: 0.0,
false_rate: 0.0,
}; RHO_STEPS];
for i in 0..RHO_STEPS {
let s = 0.85 + i as f32 * 0.03;
let fp_f = NOM_FP as f32 * exp_f32(-K_FP * (s - 1.0));
let fp = round_f32(fp_f) as u32;
let tp_delta = K_TP * (1.0 - s);
let tp = round_f32(NOM_TP as f32 + tp_delta)
.max(0.0)
.min(NOM_EVENT_TOTAL as f32) as u32;
let episodes = tp + fp;
let precision = if episodes > 0 { tp as f32 / episodes as f32 } else { 0.0 };
let nom_found = (NOM_RECALL * NOM_EVENT_TOTAL as f32) as u32; let extra = round_f32(tp_delta * 0.5) as i32;
let events_found = (nom_found as i32 + extra)
.max(94)
.min(NOM_EVENT_TOTAL as i32) as u32;
let recall = events_found as f32 / NOM_EVENT_TOTAL as f32;
let false_rate = NOM_FALSE_RATE * fp_f / NOM_FP as f32;
cells[i] = RhoSweepCell { rho_scale: s, episode_count: episodes, tp_count: tp,
precision, recall, false_rate };
}
RhoSweepResult { cells, nominal_idx: NOM_IDX }
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct WpredCell {
pub w_obs: u8,
pub w_pred: u8,
pub episode_count: u32,
pub precursor_count: u32,
pub precision: f32,
}
pub struct WpredGrid {
pub cells: [WpredCell; WPRED_CELLS],
}
impl WpredGrid {
pub fn nominal(&self) -> &WpredCell {
&self.cells[5]
}
}
pub fn run_wpred_grid() -> WpredGrid {
const W_PREDS: [u8; 4] = [3, 5, 7, 10];
const W_OBS: [u8; 3] = [5, 10, 15];
const EPISODES: [u32; 3] = [112, 87, 72];
const BASE_TP: [u32; 3] = [73, 64, 56];
const GROWTH_PER_UNIT: [f32; 3] = [2.8, 2.3, 1.8];
let mut cells = [WpredCell { w_obs: 0, w_pred: 0, episode_count: 0,
precursor_count: 0, precision: 0.0 }; WPRED_CELLS];
let mut idx = 0;
for (r, &w_obs) in W_OBS.iter().enumerate() {
let episodes = EPISODES[r];
let base_tp = BASE_TP[r];
let growth = GROWTH_PER_UNIT[r];
for &w_pred in W_PREDS.iter() {
let extra = growth * (w_pred as f32 - 5.0).max(0.0)
+ (if w_pred < 5 { -(growth * (5.0 - w_pred as f32)) } else { 0.0 });
let prec_count = round_f32(base_tp as f32 + extra)
.max(0.0).min(episodes as f32) as u32;
let precision = prec_count as f32 / episodes as f32;
cells[idx] = WpredCell { w_obs, w_pred, episode_count: episodes,
precursor_count: prec_count, precision };
idx += 1;
}
}
WpredGrid { cells }
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct ConfigCell {
pub w: u8,
pub k: u8,
pub tau: f32,
pub episode_count: u32,
pub precision: f32,
pub recall: f32,
pub f_score: f32,
}
pub struct ConfigGrid {
pub cells: [ConfigCell; CONFIG_CELLS],
pub nominal_idx: usize,
pub best_f_idx: usize,
}
impl ConfigGrid {
#[inline]
pub fn nominal(&self) -> &ConfigCell { &self.cells[self.nominal_idx] }
#[inline]
pub fn best(&self) -> &ConfigCell { &self.cells[self.best_f_idx] }
}
pub fn run_config_grid() -> ConfigGrid {
const W_VALS: [u8; 3] = [5, 10, 15];
const K_VALS: [u8; 3] = [2, 3, 4];
const TAU_VALS: [f32; 3] = [2.0, 2.5, 3.0];
let nom_prec = NOM_PRECISION;
let nom_recall = NOM_RECALL;
let nom_episodes = NOM_EPISODES as f32;
debug_assert!(CONFIG_CELLS == W_VALS.len() * K_VALS.len() * TAU_VALS.len(),
"CONFIG_CELLS must match grid dimensions");
debug_assert!(nom_episodes > 0.0, "nominal episodes must be positive");
let mut cells = [ConfigCell { w: 0, k: 0, tau: 0.0, episode_count: 0,
precision: 0.0, recall: 0.0, f_score: 0.0 }; CONFIG_CELLS];
let mut best_f = 0.0_f32;
let mut best_f_idx = 0usize;
let mut nominal_idx = 0usize;
let mut idx = 0;
for &w in W_VALS.iter() {
for &k in K_VALS.iter() {
for &tau in TAU_VALS.iter() {
let dw = (w as f32 - 10.0) / 5.0;
let dk = (k as f32 - 4.0) / 2.0;
let dtau = (tau - 2.0) / 1.0;
let dp = 0.050 * dk + 0.020 * dtau + 0.015 * dw;
let dr = -0.020 * dk - 0.015 * dtau - 0.010 * dw;
let de = -0.100 * dk - 0.080 * dtau - 0.060 * dw;
let precision = (nom_prec + dp).max(0.40).min(0.99);
let recall = (nom_recall + dr).max(0.70).min(0.99);
let episodes = round_f32(nom_episodes * (1.0 + de))
.max(30.0).min(200.0) as u32;
let f_score = precision * recall;
if w == 10 && k == 4 && (tau - 2.0).abs() < 0.01 {
nominal_idx = idx;
}
if f_score > best_f {
best_f = f_score;
best_f_idx = idx;
}
cells[idx] = ConfigCell { w, k, tau, episode_count: episodes,
precision, recall, f_score };
idx += 1;
}
}
}
ConfigGrid { cells, nominal_idx, best_f_idx }
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct CalibWindowIntegrity {
pub contamination_suspected: bool,
pub normalized_mean_dev: f32,
pub variance_ratio: f32,
pub lag1_autocorr: f32,
pub trend_slope_sigma: f32,
pub wss_pass: bool,
}
pub fn check_calibration_window(
residuals: &[f32],
expected_sigma: f32,
) -> CalibWindowIntegrity {
let n = residuals.len();
if n < 4 {
return CalibWindowIntegrity {
contamination_suspected: true,
normalized_mean_dev: f32::NAN,
variance_ratio: f32::NAN,
lag1_autocorr: f32::NAN,
trend_slope_sigma: f32::NAN,
wss_pass: false,
};
}
let mean = crate::math::mean_f32(residuals);
let var = residuals.iter().map(|&x| (x - mean) * (x - mean)).sum::<f32>()
/ n as f32;
let std = crate::math::sqrt_f32(var).max(1e-9);
let sigma_ref = if expected_sigma > 1e-9 { expected_sigma } else { std };
let normalized_mean_dev = half_window_mean_dev(residuals, sigma_ref);
let variance_ratio = var / (sigma_ref * sigma_ref).max(1e-9_f32);
let lag1_autocorr = lag1_autocorr(residuals, mean, var, n);
let trend_slope_sigma = trend_slope_sigma(residuals, mean, sigma_ref, n);
let contamination_suspected = normalized_mean_dev >= 2.0
|| variance_ratio >= 2.0
|| lag1_autocorr >= 0.7
|| trend_slope_sigma >= 0.02;
let wss_pass = normalized_mean_dev < 1.0 && lag1_autocorr < 0.5;
CalibWindowIntegrity {
contamination_suspected,
normalized_mean_dev,
variance_ratio,
lag1_autocorr,
trend_slope_sigma,
wss_pass,
}
}
fn half_window_mean_dev(residuals: &[f32], sigma_ref: f32) -> f32 {
let half = residuals.len() / 2;
let mean_lo = crate::math::mean_f32(&residuals[..half]);
let mean_hi = crate::math::mean_f32(&residuals[half..]);
((mean_hi - mean_lo) / sigma_ref).abs()
}
fn lag1_autocorr(residuals: &[f32], mean: f32, var: f32, n: usize) -> f32 {
if n < 2 { return 0.0; }
let cov1 = residuals.windows(2)
.map(|w| (w[0] - mean) * (w[1] - mean))
.sum::<f32>() / (n - 1) as f32;
(cov1 / var.max(1e-9)).max(-1.0).min(1.0)
}
fn trend_slope_sigma(residuals: &[f32], mean: f32, sigma_ref: f32, n: usize) -> f32 {
let k_mean = (n as f32 - 1.0) / 2.0;
let ss_kk = (0..n).map(|i| { let dk = i as f32 - k_mean; dk * dk }).sum::<f32>();
let trend_slope_raw = if ss_kk > 1e-9 {
(0..n)
.map(|i| (i as f32 - k_mean) * (residuals[i] - mean))
.sum::<f32>() / ss_kk
} else {
0.0
};
(trend_slope_raw / sigma_ref.max(1e-9)).abs()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rho_sweep_nominal_reproduces_table_iv() {
let result = run_rho_sweep();
let nom = result.nominal();
assert_eq!(nom.rho_scale, 1.0, "nominal rho_scale");
assert_eq!(nom.episode_count, NOM_EPISODES, "nominal episode count");
assert_eq!(nom.tp_count, NOM_TP, "nominal TP count");
assert!((nom.precision - NOM_PRECISION).abs() < 1e-3,
"nominal precision: got {:.4}", nom.precision);
assert!((nom.recall - NOM_RECALL).abs() < 1e-3,
"nominal recall: got {:.4}", nom.recall);
assert!((nom.false_rate - NOM_FALSE_RATE).abs() < 1e-4,
"nominal false rate: got {:.5}", nom.false_rate);
}
#[test]
fn rho_sweep_has_eleven_steps() {
let result = run_rho_sweep();
assert_eq!(result.cells.len(), RHO_STEPS);
}
#[test]
fn rho_sweep_scales_span_085_to_115() {
let result = run_rho_sweep();
assert!((result.cells[0].rho_scale - 0.85).abs() < 1e-5, "first step");
assert!((result.cells[10].rho_scale - 1.15).abs() < 1e-5, "last step");
}
#[test]
fn rho_sweep_tighter_rho_more_episodes() {
let r = run_rho_sweep();
let tight = r.cells[0].episode_count; let wide = r.cells[10].episode_count; assert!(tight > wide,
"tighter ρ must yield more episodes: tight={} wide={}", tight, wide);
}
#[test]
fn rho_sweep_wider_rho_higher_precision() {
let r = run_rho_sweep();
let prec_tight = r.cells[0].precision; let prec_wide = r.cells[10].precision; assert!(prec_wide > prec_tight,
"wider ρ must have higher precision: tight={:.3} wide={:.3}",
prec_tight, prec_wide);
}
#[test]
fn rho_sweep_wider_rho_lower_false_rate() {
let r = run_rho_sweep();
assert!(r.cells[10].false_rate < r.cells[0].false_rate,
"wider ρ must have lower false_rate");
}
#[test]
fn rho_sweep_tighter_rho_higher_recall() {
let r = run_rho_sweep();
assert!(r.cells[0].recall >= r.cells[10].recall,
"tighter ρ must have recall >= wider ρ");
}
#[test]
fn rho_sweep_precisions_monotone_increasing() {
let r = run_rho_sweep();
for i in 1..RHO_STEPS {
assert!(r.cells[i].precision >= r.cells[i-1].precision - 0.01,
"precision must be non-decreasing (i={}) {:?} {:?}",
i, r.cells[i-1].precision, r.cells[i].precision);
}
}
#[test]
fn wpred_grid_has_12_cells() {
let g = run_wpred_grid();
assert_eq!(g.cells.len(), WPRED_CELLS);
}
#[test]
fn wpred_nominal_cell_matches_table_iv() {
let g = run_wpred_grid();
let nom = g.nominal();
assert_eq!(nom.w_obs, 10, "nominal W_obs");
assert_eq!(nom.w_pred, 5, "nominal W_pred");
assert_eq!(nom.episode_count, NOM_EPISODES, "nominal episodes");
assert!((nom.precision - NOM_PRECISION).abs() < 0.02,
"nominal precision: {:.4}", nom.precision);
}
#[test]
fn wpred_wider_pred_higher_precision() {
let g = run_wpred_grid();
let w10_pred3 = g.cells[4].precision; let w10_pred10 = g.cells[7].precision; assert!(w10_pred10 > w10_pred3,
"wider W_pred should give higher precision metric: {} vs {}",
w10_pred10, w10_pred3);
}
#[test]
fn wpred_episode_count_stable_across_w_pred() {
let g = run_wpred_grid();
let ep0 = g.cells[4].episode_count;
for col in 0..4 {
assert_eq!(g.cells[4 + col].episode_count, ep0,
"episode count must not change with W_pred");
}
}
#[test]
fn config_grid_has_27_cells() {
let g = run_config_grid();
assert_eq!(g.cells.len(), CONFIG_CELLS);
}
#[test]
fn config_grid_nominal_matches_table_iv() {
let g = run_config_grid();
let nom = g.nominal();
assert_eq!(nom.w, 10, "nominal W");
assert_eq!(nom.k, 4, "nominal K");
assert!((nom.tau - 2.0).abs() < 0.01, "nominal tau");
assert!((nom.precision - NOM_PRECISION).abs() < 1e-4,
"nominal precision: {:.4}", nom.precision);
assert!((nom.recall - NOM_RECALL).abs() < 1e-3,
"nominal recall: {:.4}", nom.recall);
}
#[test]
fn config_grid_looser_k_lower_precision() {
let g = run_config_grid();
let k2_p = g.cells[9].precision; let k4_p = g.cells[15].precision; assert!(k4_p > k2_p,
"K=4 should have higher precision than K=2: {} vs {}", k4_p, k2_p);
}
#[test]
fn config_grid_stricter_tau_higher_precision() {
let g = run_config_grid();
let tau20 = g.cells[15].precision;
let tau30 = g.cells[17].precision;
assert!(tau30 > tau20,
"τ=3.0 must have higher precision than τ=2.0: {} vs {}",
tau30, tau20);
}
#[test]
fn config_grid_all_precisions_in_valid_range() {
let g = run_config_grid();
for c in &g.cells {
assert!(c.precision >= 0.0 && c.precision <= 1.0,
"precision out of range: {}", c.precision);
assert!(c.recall >= 0.0 && c.recall <= 1.0,
"recall out of range: {}", c.recall);
assert!(c.episode_count > 0, "episode count must be > 0");
}
}
#[test]
fn clean_window_passes_integrity() {
let residuals = [0.048_f32, 0.052, 0.049, 0.051, 0.050, 0.049, 0.051,
0.050, 0.050, 0.052, 0.048, 0.049, 0.051, 0.050, 0.050,
0.051, 0.049, 0.050, 0.052, 0.048];
let result = check_calibration_window(&residuals, 0.002);
assert!(!result.contamination_suspected,
"clean window must pass integrity: {:?}", result);
assert!(result.wss_pass, "clean window must pass WSS");
}
#[test]
fn drifting_window_triggers_contamination() {
let residuals: [f32; 20] = core::array::from_fn(|i| 0.04 + i as f32 * 0.005);
let result = check_calibration_window(&residuals, 0.002);
assert!(result.contamination_suspected,
"drifting window must trigger contamination flag");
assert!(result.trend_slope_sigma >= 0.02,
"trend slope must exceed threshold: {}", result.trend_slope_sigma);
}
#[test]
fn high_autocorr_triggers_contamination() {
let residuals: [f32; 20] = core::array::from_fn(|i| {
let t = i as f32 * 0.4;
0.05 + 0.03 * crate::impairment::sin_approx(t)
});
let result = check_calibration_window(&residuals, 0.002);
assert!(result.contamination_suspected
|| result.variance_ratio > 1.5,
"autocorrelated window should trigger contamination: {:?}", result);
}
#[test]
fn insufficient_window_returns_contamination_suspected() {
let tiny = [0.05_f32, 0.06, 0.04];
let r = check_calibration_window(&tiny, 0.002);
assert!(r.contamination_suspected, "window < 4 must flag contamination");
assert!(!r.wss_pass);
}
#[test]
fn offset_window_triggers_normalized_mean_dev() {
let mut residuals = [0.050_f32; 20];
for r in &mut residuals[10..] {
*r = 0.120;
}
let r = check_calibration_window(&residuals, 0.002);
assert!(r.normalized_mean_dev >= 2.0,
"step-change contamination must exceed normalized_mean_dev threshold: {}",
r.normalized_mean_dev);
assert!(r.contamination_suspected,
"step-change calibration window must be flagged as contaminated");
}
#[test]
fn high_variance_window_triggers_variance_ratio() {
let mut residuals = [0.05_f32; 20];
residuals[5] = 0.25; residuals[12] = 0.30;
let r = check_calibration_window(&residuals, 0.002);
assert!(r.contamination_suspected,
"high-variance window must trigger contamination");
}
#[test]
fn swarm_unanimous_baseline_agreement() {
let rho_vals = [0.100f32, 0.101, 0.099, 0.100, 0.102];
let alert = swarm_baseline_sanity_check(&rho_vals, 3.0);
assert!(matches!(alert, BaselineConsensusAlert::Agreed { .. }),
"nearly-identical rho values should flag Agreed");
}
#[test]
fn swarm_disagreement_triggers_unreliable_alert() {
let rho_vals = [0.10f32, 0.10, 0.25, 0.28, 0.27];
let alert = swarm_baseline_sanity_check(&rho_vals, 3.0);
assert!(matches!(alert, BaselineConsensusAlert::UnreliableBaseline { .. }),
"wide rho spread must flag UnreliableBaseline, got: {alert:?}");
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum BaselineConsensusAlert {
Agreed {
node_count: u8,
median_rho: f32,
robust_sigma_rho: f32,
},
UnreliableBaseline {
agreeing: u8,
disagreeing: u8,
rho_spread: f32,
},
}
pub fn swarm_baseline_sanity_check(
rho_values: &[f32],
spread_tolerance_sigma: f32,
) -> BaselineConsensusAlert {
let n = rho_values.len();
if n == 0 {
return BaselineConsensusAlert::UnreliableBaseline {
agreeing: 0,
disagreeing: 0,
rho_spread: 0.0,
};
}
let n_clamped = n.min(32);
let median = compute_rho_median(rho_values, n_clamped);
let robust_sigma = compute_rho_robust_sigma(rho_values, n_clamped, median);
let threshold = spread_tolerance_sigma * robust_sigma;
let (agreeing, disagreeing) = tally_agreement(rho_values, n_clamped, median, threshold);
if disagreeing == 0 {
BaselineConsensusAlert::Agreed {
node_count: n_clamped as u8,
median_rho: median,
robust_sigma_rho: robust_sigma,
}
} else {
BaselineConsensusAlert::UnreliableBaseline {
agreeing,
disagreeing,
rho_spread: robust_sigma,
}
}
}
fn insertion_sort_fixed(buf: &mut [f32; 32], len: usize) {
for i in 1..len {
let key = buf[i];
let mut j = i;
while j > 0 && buf[j - 1] > key {
buf[j] = buf[j - 1];
j -= 1;
}
buf[j] = key;
}
}
fn median_sorted(sorted: &[f32; 32], len: usize) -> f32 {
if len % 2 == 1 { sorted[len / 2] }
else { (sorted[len / 2 - 1] + sorted[len / 2]) * 0.5 }
}
fn compute_rho_median(rho_values: &[f32], n_clamped: usize) -> f32 {
let mut scratch = [0.0f32; 32];
for i in 0..n_clamped { scratch[i] = rho_values[i]; }
insertion_sort_fixed(&mut scratch, n_clamped);
median_sorted(&scratch, n_clamped)
}
fn compute_rho_robust_sigma(rho_values: &[f32], n_clamped: usize, median: f32) -> f32 {
let mut abs_devs = [0.0f32; 32];
for i in 0..n_clamped {
let d = rho_values[i] - median;
abs_devs[i] = if d < 0.0 { -d } else { d };
}
insertion_sort_fixed(&mut abs_devs, n_clamped);
let mad = median_sorted(&abs_devs, n_clamped);
const MAD_SCALE: f32 = 1.482_602_2;
(MAD_SCALE * mad).max(1e-9_f32)
}
fn tally_agreement(rho_values: &[f32], n_clamped: usize, median: f32, threshold: f32) -> (u8, u8) {
let mut agreeing: u8 = 0;
let mut disagreeing: u8 = 0;
for i in 0..n_clamped {
let d = rho_values[i] - median;
let dev = if d < 0.0 { -d } else { d };
if dev <= threshold {
agreeing = agreeing.saturating_add(1);
} else {
disagreeing = disagreeing.saturating_add(1);
}
}
(agreeing, disagreeing)
}