#![no_std]
#![forbid(unsafe_code)]
#![deny(clippy::unwrap_used)]
#[cfg(feature = "std")]
extern crate std;
pub mod types;
pub mod error;
pub mod config;
pub mod residual;
pub mod sign;
pub mod envelope;
pub mod grammar;
pub mod heuristics_bank;
pub mod dsa;
pub mod policy;
pub mod episode;
pub mod baseline;
pub mod causality;
pub mod graph_inference;
pub mod episode_catalog;
#[cfg(feature = "std")]
pub mod adapters;
#[cfg(feature = "paper-lock")]
pub mod real_data;
#[cfg(feature = "std")]
pub mod calibration;
#[cfg(feature = "std")]
pub mod incumbent_baselines;
#[cfg(feature = "std")]
pub mod render;
#[cfg(feature = "std")]
pub mod fusion;
#[cfg(feature = "std")]
pub mod audit;
#[cfg(feature = "demo")]
pub mod demo;
use types::*;
use error::{DsfbError, Result};
use config::EngineConfig;
use heuristics_bank::HeuristicsBank;
pub struct DsfbDebugEngine<
const MAX_SIGNALS: usize,
const MAX_MOTIFS: usize,
> {
config: EngineConfig,
heuristics_bank: HeuristicsBank<MAX_MOTIFS>,
}
impl<const S: usize, const M: usize> DsfbDebugEngine<S, M> {
pub fn new(config: EngineConfig) -> Result<Self> {
config.validate()?;
Ok(Self {
config,
heuristics_bank: HeuristicsBank::with_canonical_motifs(),
})
}
pub fn paper_lock() -> Result<Self> {
Self::new(config::PAPER_LOCK_CONFIG)
}
pub fn config(&self) -> &EngineConfig {
&self.config
}
pub fn heuristics_bank(&self) -> &HeuristicsBank<M> {
&self.heuristics_bank
}
#[allow(clippy::too_many_arguments)]
pub fn evaluate_signal(
&self,
residual_norms: &[f64], k: usize,
rho: f64,
signal_index: u16,
window_index: u64,
was_imputed: bool,
recent_raw_states: &[GrammarState], persistence_count: usize,
) -> SignalEvaluation {
if was_imputed {
return SignalEvaluation {
window_index,
signal_index,
residual_value: 0.0,
sign_tuple: SignTuple::ZERO,
raw_grammar_state: GrammarState::Admissible,
confirmed_grammar_state: GrammarState::Admissible,
reason_code: ReasonCode::Admissible,
motif: None,
semantic_disposition: SemanticDisposition::Unknown,
dsa_score: 0.0,
policy_state: PolicyState::Silent,
was_imputed: true,
drift_persistence: 0.0,
};
}
let sign_tuple = sign::compute_sign_tuple(residual_norms, k);
let drift_pers = sign::drift_persistence(
residual_norms, k, self.config.drift_window,
);
let (raw_grammar, reason_code) = grammar::evaluate_raw_grammar(
&sign_tuple, rho, &self.config, drift_pers,
);
let confirmed = grammar::hysteresis_confirm(
recent_raw_states, self.config.hysteresis_confirm,
);
let confirmed_grammar = if raw_grammar == GrammarState::Violation {
GrammarState::Violation
} else {
confirmed
};
let slew_mag = if sign_tuple.slew > 0.0 { sign_tuple.slew } else { -sign_tuple.slew };
let dsa_score = dsa::compute_dsa_score(
0.0, drift_pers,
if slew_mag > self.config.slew_delta { 1.0 } else { 0.0 },
);
let gate_passed = dsa::consistency_gate(dsa_score, self.config.consistency_gate);
let semantic = self.heuristics_bank.lookup(reason_code, drift_pers, slew_mag);
let motif = match semantic {
SemanticDisposition::Named(m) => Some(m),
SemanticDisposition::Unknown => None,
};
let policy_state = policy::apply_policy(
confirmed_grammar,
dsa_score,
gate_passed,
semantic,
persistence_count,
self.config.persistence_threshold,
);
SignalEvaluation {
window_index,
signal_index,
residual_value: if k < residual_norms.len() { residual_norms[k] } else { 0.0 },
sign_tuple,
raw_grammar_state: raw_grammar,
confirmed_grammar_state: confirmed_grammar,
reason_code,
motif,
semantic_disposition: semantic,
dsa_score,
policy_state,
was_imputed: false,
drift_persistence: drift_pers,
}
}
#[allow(clippy::too_many_arguments)]
pub fn run_evaluation(
&self,
data: &[f64], num_signals: usize,
num_windows: usize,
fault_labels: &[bool], healthy_window_end: usize,
eval_out: &mut [SignalEvaluation],
episodes_out: &mut [DebugEpisode],
dataset_name: &'static str,
) -> Result<(usize, BenchmarkMetrics)> {
if num_signals > S {
return Err(DsfbError::SignalBufferFull);
}
if data.len() < num_windows * num_signals {
return Err(DsfbError::DimensionMismatch {
expected: num_windows * num_signals,
got: data.len(),
});
}
const FLAT_CAP: usize = 8192;
let needed = match num_signals.checked_mul(num_windows) {
Some(n) => n,
None => return Err(DsfbError::BufferTooSmall { needed: usize::MAX, available: FLAT_CAP }),
};
if needed > FLAT_CAP {
return Err(DsfbError::BufferTooSmall { needed, available: FLAT_CAP });
}
let mut baseline_mean = [0.0_f64; S];
let mut rho = [0.0_f64; S];
let healthy_data_end = healthy_window_end * num_signals;
let healthy_slice = if healthy_data_end <= data.len() {
&data[..healthy_data_end]
} else {
data
};
baseline::compute_baseline_mean(
healthy_slice, num_signals, healthy_window_end, &mut baseline_mean[..num_signals],
);
baseline::compute_baseline_envelope(
healthy_slice, &baseline_mean[..num_signals],
num_signals, healthy_window_end, &mut rho[..num_signals],
);
let mut persistence_counts = [0_usize; S];
let mut recent_raw = [[GrammarState::Admissible; 4]; S]; let mut raw_head = [0_usize; S];
let mut policy_states_flat: [PolicyState; 8192] = [PolicyState::Silent; 8192];
let mut reason_codes_flat: [ReasonCode; 8192] = [ReasonCode::Admissible; 8192];
let mut drift_dirs_flat: [DriftDirection; 8192] = [DriftDirection::None; 8192];
let mut slew_mags_flat: [f64; 8192] = [0.0; 8192];
let mut raw_anomaly_count: u64 = 0;
const NORM_HIST: usize = 32; let mut norm_histories = [[0.0_f64; NORM_HIST]; S];
let mut norm_heads = [0_usize; S];
let mut w = 0_usize;
while w < num_windows {
let mut s = 0_usize;
while s < num_signals {
let data_idx = w * num_signals + s;
let obs = if data_idx < data.len() { data[data_idx] } else { 0.0 };
let is_nan = obs.is_nan(); let residual = if is_nan { 0.0 } else { obs - baseline_mean[s] };
let norm = residual::residual_norm(residual);
let h = norm_heads[s];
if h < NORM_HIST {
norm_histories[s][h] = norm;
norm_heads[s] = h + 1;
} else {
let mut i = 0;
while i < NORM_HIST - 1 {
norm_histories[s][i] = norm_histories[s][i + 1];
i += 1;
}
norm_histories[s][NORM_HIST - 1] = norm;
}
let nh = norm_heads[s];
let k = if nh > 0 { nh - 1 } else { 0 };
let rh = raw_head[s];
let recent_slice_len = if rh < 4 { rh } else { 4 };
let _recent_start = rh.saturating_sub(4);
let recent = &recent_raw[s][..recent_slice_len];
let eval = self.evaluate_signal(
&norm_histories[s][..nh],
k,
rho[s],
s as u16,
w as u64,
is_nan,
recent,
persistence_counts[s],
);
if eval.confirmed_grammar_state >= GrammarState::Boundary {
persistence_counts[s] += 1;
} else {
persistence_counts[s] = 0;
}
let rh_idx = raw_head[s] % 4;
recent_raw[s][rh_idx] = eval.raw_grammar_state;
raw_head[s] += 1;
let eval_idx = w * num_signals + s;
if eval_idx < eval_out.len() {
eval_out[eval_idx] = eval;
}
let flat_idx = w * num_signals + s;
if flat_idx < policy_states_flat.len() {
policy_states_flat[flat_idx] = eval.policy_state;
reason_codes_flat[flat_idx] = eval.reason_code;
slew_mags_flat[flat_idx] = if eval.sign_tuple.slew > 0.0 {
eval.sign_tuple.slew
} else {
-eval.sign_tuple.slew
};
drift_dirs_flat[flat_idx] = if eval.sign_tuple.drift > 0.1 {
DriftDirection::Positive
} else if eval.sign_tuple.drift < -0.1 {
DriftDirection::Negative
} else {
DriftDirection::None
};
}
if eval.confirmed_grammar_state >= GrammarState::Boundary {
raw_anomaly_count += 1;
}
s += 1;
}
w += 1;
}
let total_flat = num_windows * num_signals;
let flat_len = if total_flat < policy_states_flat.len() {
total_flat
} else {
policy_states_flat.len()
};
let episode_count = episode::aggregate_episodes(
&policy_states_flat[..flat_len],
num_signals,
num_windows,
&reason_codes_flat[..flat_len],
&drift_dirs_flat[..flat_len],
&slew_mags_flat[..flat_len],
self.config.episode_correlation_window,
episodes_out,
);
let mut ep_idx: usize = 0;
while ep_idx < episode_count {
let ep = episodes_out[ep_idx];
let start_w = ep.start_window as usize;
let end_w = ep.end_window as usize;
let mut sum_drift: f64 = 0.0;
let mut boundary_count: usize = 0;
let mut total: usize = 0;
let mut w = start_w;
while w <= end_w && w < num_windows {
let mut s = 0;
while s < num_signals {
let idx = w * num_signals + s;
if idx < eval_out.len() {
let e = eval_out[idx];
sum_drift += e.drift_persistence;
if e.confirmed_grammar_state == GrammarState::Boundary {
boundary_count += 1;
}
total += 1;
}
s += 1;
}
w += 1;
}
let avg_drift = if total > 0 { sum_drift / total as f64 } else { 0.0 };
let avg_boundary = if total > 0 { boundary_count as f64 / total as f64 } else { 0.0 };
let disposition = self.heuristics_bank.match_episode(&ep, avg_drift, avg_boundary);
episodes_out[ep_idx].matched_motif = disposition;
if let SemanticDisposition::Named(motif) = disposition {
let recommended = self.heuristics_bank.recommended_action(motif);
if episodes_out[ep_idx].policy_state == PolicyState::Review
&& recommended == PolicyState::Escalate
{
episodes_out[ep_idx].policy_state = PolicyState::Escalate;
}
}
ep_idx += 1;
}
let metrics = episode::compute_metrics(
episodes_out,
episode_count,
fault_labels,
raw_anomaly_count,
self.config.episode_precision_window,
dataset_name,
num_signals as u16,
);
Ok((episode_count, metrics))
}
#[allow(clippy::too_many_arguments)]
pub fn run_evaluation_with_graph(
&self,
data: &[f64],
num_signals: usize,
num_windows: usize,
fault_labels: &[bool],
healthy_window_end: usize,
eval_out: &mut [SignalEvaluation],
episodes_out: &mut [DebugEpisode],
dataset_name: &'static str,
service_graph: &[(u16, u16)],
) -> Result<(usize, BenchmarkMetrics)> {
let (episode_count, metrics) = self.run_evaluation(
data, num_signals, num_windows, fault_labels,
healthy_window_end, eval_out, episodes_out, dataset_name,
)?;
causality::attribute_root_causes(
episodes_out,
episode_count,
eval_out,
num_signals,
num_windows,
service_graph,
self.config.slew_delta,
);
Ok((episode_count, metrics))
}
pub fn verify_deterministic_replay(
&self,
data: &[f64],
num_signals: usize,
num_windows: usize,
fault_labels: &[bool],
healthy_window_end: usize,
) -> Result<bool> {
let mut eval1 = [SignalEvaluation {
window_index: 0, signal_index: 0, residual_value: 0.0,
sign_tuple: SignTuple::ZERO,
raw_grammar_state: GrammarState::Admissible,
confirmed_grammar_state: GrammarState::Admissible,
reason_code: ReasonCode::Admissible,
motif: None, semantic_disposition: SemanticDisposition::Unknown,
dsa_score: 0.0, policy_state: PolicyState::Silent, was_imputed: false,
drift_persistence: 0.0,
}; 4096];
let blank_ep = DebugEpisode {
episode_id: 0, start_window: 0, end_window: 0,
peak_grammar_state: GrammarState::Admissible,
primary_reason_code: ReasonCode::Admissible,
matched_motif: SemanticDisposition::Unknown,
policy_state: PolicyState::Silent,
contributing_signal_count: 0,
structural_signature: StructuralSignature {
dominant_drift_direction: DriftDirection::None,
peak_slew_magnitude: 0.0, duration_windows: 0, signal_correlation: 0.0,
},
root_cause_signal_index: None,
};
let mut ep1 = [blank_ep; 256];
let (c1, m1) = self.run_evaluation(
data, num_signals, num_windows, fault_labels,
healthy_window_end, &mut eval1, &mut ep1, "replay_test",
)?;
let mut eval2 = eval1;
let mut i = 0;
while i < eval2.len() {
eval2[i] = SignalEvaluation {
window_index: 0, signal_index: 0, residual_value: 0.0,
sign_tuple: SignTuple::ZERO,
raw_grammar_state: GrammarState::Admissible,
confirmed_grammar_state: GrammarState::Admissible,
reason_code: ReasonCode::Admissible,
motif: None, semantic_disposition: SemanticDisposition::Unknown,
dsa_score: 0.0, policy_state: PolicyState::Silent, was_imputed: false,
drift_persistence: 0.0,
};
i += 1;
}
let mut ep2 = [blank_ep; 256];
let (c2, m2) = self.run_evaluation(
data, num_signals, num_windows, fault_labels,
healthy_window_end, &mut eval2, &mut ep2, "replay_test",
)?;
if c1 != c2 { return Ok(false); }
if m1.dsfb_episode_count != m2.dsfb_episode_count { return Ok(false); }
if m1.raw_anomaly_count != m2.raw_anomaly_count { return Ok(false); }
let mut j = 0;
while j < c1 {
if ep1[j] != ep2[j] { return Ok(false); }
j += 1;
}
Ok(true)
}
}
impl DsfbDebugEngine<256, 64> {
pub fn default_size() -> Result<Self> {
Self::paper_lock()
}
}