Skip to main content

tacet_core/adaptive/
posterior.rs

1//! Posterior distribution representation for Bayesian inference.
2//!
3//! The posterior is Gaussian: δ | Δ ~ N(δ_post, Λ_post) where:
4//! - δ ∈ ℝ⁹ is the per-decile timing difference vector
5//! - Δ is the observed quantile difference vector
6//!
7//! For interpretability, the 9D posterior is projected to 2D (shift, tail)
8//! using GLS: β_proj = (X'Σ_n⁻¹X)⁻¹ X'Σ_n⁻¹ δ_post
9
10extern crate alloc;
11use alloc::vec::Vec;
12
13use crate::math::sqrt;
14use crate::result::{EffectEstimate, EffectPattern, MeasurementQuality};
15use crate::types::{Matrix2, Matrix9, Vector2, Vector9};
16
17// Note: classify_pattern from analysis::effect is no longer used here.
18// We now use classify_pattern_from_draws() which uses β draws directly.
19
20/// Posterior distribution parameters for the 9D effect vector δ.
21///
22/// The posterior is Gaussian: δ | Δ ~ N(δ_post, Λ_post) where each δ_k
23/// represents the timing difference at decile k.
24///
25/// The 2D projection β_proj = (μ, τ) provides shift/tail decomposition.
26///
27/// Uses Student's t prior (ν=4) via Gibbs sampling for robust inference.
28#[derive(Clone, Debug)]
29pub struct Posterior {
30    /// 9D posterior mean δ_post in nanoseconds.
31    pub delta_post: Vector9,
32
33    /// 9D posterior covariance Λ_post.
34    pub lambda_post: Matrix9,
35
36    /// 2D GLS projection β = (μ, τ) for interpretability.
37    /// - β[0] = μ (uniform shift)
38    /// - β[1] = τ (tail effect)
39    pub beta_proj: Vector2,
40
41    /// 2D projection covariance (empirical, from draws).
42    pub beta_proj_cov: Matrix2,
43
44    /// Retained β draws for dominance-based classification (spec §3.4.6).
45    /// Each draw is β^(s) = A·δ^(s) where A is the GLS projection matrix.
46    pub beta_draws: Vec<Vector2>,
47
48    /// Leak probability: P(max_k |δ_k| > θ | Δ).
49    /// Computed via Monte Carlo integration over the 9D posterior.
50    pub leak_probability: f64,
51
52    /// Projection mismatch Q statistic.
53    /// Q = r'Σ_n⁻¹r where r = δ_post - X β_proj.
54    /// High values indicate the 2D summary is approximate.
55    pub projection_mismatch_q: f64,
56
57    /// Number of samples used in this posterior computation.
58    pub n: usize,
59
60    // ==================== Gibbs sampler fields ====================
61
62    /// Posterior mean of latent scale λ.
63    /// `None` if using simple posterior (no Gibbs sampler).
64    pub lambda_mean: Option<f64>,
65
66    /// Whether the Gibbs sampler's lambda chain mixed well.
67    /// `None` if using simple posterior.
68    /// When `Some(false)`, indicates potential posterior unreliability.
69    pub lambda_mixing_ok: Option<bool>,
70
71    /// Posterior mean of likelihood precision κ.
72    /// `None` if using simple posterior.
73    pub kappa_mean: Option<f64>,
74
75    /// Coefficient of variation of κ.
76    /// `None` if using simple posterior.
77    pub kappa_cv: Option<f64>,
78
79    /// Effective sample size of κ chain.
80    /// `None` if using simple posterior.
81    pub kappa_ess: Option<f64>,
82
83    /// Whether the Gibbs sampler's kappa chain mixed well.
84    /// `None` if using simple posterior.
85    pub kappa_mixing_ok: Option<bool>,
86}
87
88impl Posterior {
89    /// Create a new posterior with given parameters.
90    #[allow(clippy::too_many_arguments)]
91    pub fn new(
92        delta_post: Vector9,
93        lambda_post: Matrix9,
94        beta_proj: Vector2,
95        beta_proj_cov: Matrix2,
96        beta_draws: Vec<Vector2>,
97        leak_probability: f64,
98        projection_mismatch_q: f64,
99        n: usize,
100    ) -> Self {
101        Self {
102            delta_post,
103            lambda_post,
104            beta_proj,
105            beta_proj_cov,
106            beta_draws,
107            leak_probability,
108            projection_mismatch_q,
109            n,
110            lambda_mean: None,        // v5.4: no Gibbs sampler
111            lambda_mixing_ok: None,   // v5.4: no Gibbs sampler
112            kappa_mean: None,         // v5.6: no Gibbs sampler
113            kappa_cv: None,           // v5.6: no Gibbs sampler
114            kappa_ess: None,          // v5.6: no Gibbs sampler
115            kappa_mixing_ok: None,    // v5.6: no Gibbs sampler
116        }
117    }
118
119    /// Create a new posterior with Gibbs sampler diagnostics (v5.4, v5.6).
120    #[allow(clippy::too_many_arguments)]
121    pub fn new_with_gibbs(
122        delta_post: Vector9,
123        lambda_post: Matrix9,
124        beta_proj: Vector2,
125        beta_proj_cov: Matrix2,
126        beta_draws: Vec<Vector2>,
127        leak_probability: f64,
128        projection_mismatch_q: f64,
129        n: usize,
130        lambda_mean: f64,
131        lambda_mixing_ok: bool,
132        kappa_mean: f64,
133        kappa_cv: f64,
134        kappa_ess: f64,
135        kappa_mixing_ok: bool,
136    ) -> Self {
137        Self {
138            delta_post,
139            lambda_post,
140            beta_proj,
141            beta_proj_cov,
142            beta_draws,
143            leak_probability,
144            projection_mismatch_q,
145            n,
146            lambda_mean: Some(lambda_mean),
147            lambda_mixing_ok: Some(lambda_mixing_ok),
148            kappa_mean: Some(kappa_mean),
149            kappa_cv: Some(kappa_cv),
150            kappa_ess: Some(kappa_ess),
151            kappa_mixing_ok: Some(kappa_mixing_ok),
152        }
153    }
154
155    /// Get the shift component (μ) from the 2D projection.
156    #[inline]
157    pub fn shift_ns(&self) -> f64 {
158        self.beta_proj[0]
159    }
160
161    /// Get the tail component (τ) from the 2D projection.
162    #[inline]
163    pub fn tail_ns(&self) -> f64 {
164        self.beta_proj[1]
165    }
166
167    /// Get the standard error of the shift component.
168    #[inline]
169    pub fn shift_se(&self) -> f64 {
170        sqrt(self.beta_proj_cov[(0, 0)])
171    }
172
173    /// Get the standard error of the tail component.
174    #[inline]
175    pub fn tail_se(&self) -> f64 {
176        sqrt(self.beta_proj_cov[(1, 1)])
177    }
178
179    /// Get the max absolute effect across all deciles.
180    pub fn max_effect_ns(&self) -> f64 {
181        self.delta_post
182            .iter()
183            .map(|x| x.abs())
184            .fold(0.0_f64, f64::max)
185    }
186
187    /// Build an EffectEstimate from this posterior.
188    ///
189    /// This computes the credible interval and classifies the effect pattern
190    /// using dominance-based classification from the β draws (spec §3.4.6).
191    pub fn to_effect_estimate(&self) -> EffectEstimate {
192        // Classify pattern using dominance-based approach from draws
193        let pattern = self.classify_pattern_from_draws();
194
195        // Compute credible interval from posterior covariance
196        let shift_std = self.shift_se();
197        let tail_std = self.tail_se();
198        let total_effect =
199            sqrt(self.shift_ns() * self.shift_ns() + self.tail_ns() * self.tail_ns());
200        let total_std = sqrt((shift_std * shift_std + tail_std * tail_std) / 2.0);
201
202        let ci_low = (total_effect - 1.96 * total_std).max(0.0);
203        let ci_high = total_effect + 1.96 * total_std;
204
205        EffectEstimate {
206            shift_ns: self.shift_ns(),
207            tail_ns: self.tail_ns(),
208            credible_interval_ns: (ci_low, ci_high),
209            pattern,
210            interpretation_caveat: None,
211        }
212    }
213
214    /// Classify effect pattern using dominance-based approach from β draws (spec §3.4.6).
215    ///
216    /// Uses posterior draws to compute dominance probabilities rather than
217    /// relying on "statistical significance" (|effect| > 2×SE), which is
218    /// brittle under covariance regularization.
219    ///
220    /// Classification rules (dominance is primary):
221    /// - UniformShift: shift dominates tail (|μ| ≥ 5|τ| with ≥80% probability)
222    /// - TailEffect: tail dominates shift (|τ| ≥ 5|μ| with ≥80% probability)
223    /// - Mixed: both practically significant and neither dominates
224    /// - Indeterminate: effect too small or uncertain to classify
225    fn classify_pattern_from_draws(&self) -> EffectPattern {
226        // Dominance ratio threshold: one effect must be 5x larger to dominate
227        const DOMINANCE_RATIO: f64 = 5.0;
228        // Probability threshold for dominance classification
229        const DOMINANCE_PROB: f64 = 0.80;
230        // Threshold for effect to be "practically significant" (absolute, in ns)
231        // Effects smaller than this are considered noise
232        const MIN_SIGNIFICANT_NS: f64 = 10.0;
233
234        if self.beta_draws.is_empty() {
235            // Fallback to point estimate if no draws available
236            return self.classify_pattern_point_estimate();
237        }
238
239        let n = self.beta_draws.len() as f64;
240
241        // Count draws where each condition holds
242        let mut shift_significant_count = 0;
243        let mut tail_significant_count = 0;
244        let mut shift_dominates_count = 0;
245        let mut tail_dominates_count = 0;
246
247        for beta in &self.beta_draws {
248            let shift_abs = beta[0].abs();
249            let tail_abs = beta[1].abs();
250
251            // Check if components are practically significant (absolute threshold)
252            if shift_abs > MIN_SIGNIFICANT_NS {
253                shift_significant_count += 1;
254            }
255            if tail_abs > MIN_SIGNIFICANT_NS {
256                tail_significant_count += 1;
257            }
258
259            // Check dominance (with safety for near-zero values)
260            if tail_abs < 1e-12 || shift_abs >= DOMINANCE_RATIO * tail_abs {
261                shift_dominates_count += 1;
262            }
263            if shift_abs < 1e-12 || tail_abs >= DOMINANCE_RATIO * shift_abs {
264                tail_dominates_count += 1;
265            }
266        }
267
268        // Compute probabilities
269        let p_shift_significant = shift_significant_count as f64 / n;
270        let p_tail_significant = tail_significant_count as f64 / n;
271        let p_shift_dominates = shift_dominates_count as f64 / n;
272        let p_tail_dominates = tail_dominates_count as f64 / n;
273
274        // Classification: dominance is the primary criterion
275        // If one component dominates in most draws, that determines the pattern
276        if p_shift_dominates >= DOMINANCE_PROB {
277            EffectPattern::UniformShift
278        } else if p_tail_dominates >= DOMINANCE_PROB {
279            EffectPattern::TailEffect
280        } else if p_shift_significant >= DOMINANCE_PROB && p_tail_significant >= DOMINANCE_PROB {
281            // Neither dominates but both are significant -> Mixed
282            EffectPattern::Mixed
283        } else {
284            EffectPattern::Indeterminate
285        }
286    }
287
288    /// Fallback classification using point estimates when no draws available.
289    fn classify_pattern_point_estimate(&self) -> EffectPattern {
290        const DOMINANCE_RATIO: f64 = 5.0;
291
292        let shift_abs = self.shift_ns().abs();
293        let tail_abs = self.tail_ns().abs();
294        let shift_se = self.shift_se();
295        let tail_se = self.tail_se();
296
297        // Check statistical significance (|effect| > 2×SE)
298        let shift_significant = shift_abs > 2.0 * shift_se;
299        let tail_significant = tail_abs > 2.0 * tail_se;
300
301        match (shift_significant, tail_significant) {
302            (true, false) => EffectPattern::UniformShift,
303            (false, true) => EffectPattern::TailEffect,
304            (true, true) => {
305                // Both significant - check dominance
306                if shift_abs > tail_abs * DOMINANCE_RATIO {
307                    EffectPattern::UniformShift
308                } else if tail_abs > shift_abs * DOMINANCE_RATIO {
309                    EffectPattern::TailEffect
310                } else {
311                    EffectPattern::Mixed
312                }
313            }
314            (false, false) => EffectPattern::Indeterminate,
315        }
316    }
317
318    /// Get measurement quality based on the effect standard error.
319    ///
320    /// Quality is determined by the minimum detectable effect (MDE),
321    /// which is approximately 2x the effect standard error.
322    pub fn measurement_quality(&self) -> MeasurementQuality {
323        let effect_se = self.shift_se();
324        MeasurementQuality::from_mde_ns(effect_se * 2.0)
325    }
326
327    /// Convert to an FFI-friendly summary containing only scalar fields.
328    ///
329    /// This uses the canonical effect pattern classification from draws (spec §3.4.6)
330    /// rather than the simpler heuristics that were previously duplicated in bindings.
331    pub fn to_summary(&self) -> crate::ffi_summary::PosteriorSummary {
332        let effect = self.to_effect_estimate();
333
334        crate::ffi_summary::PosteriorSummary {
335            shift_ns: self.shift_ns(),
336            tail_ns: self.tail_ns(),
337            shift_se: self.shift_se(),
338            tail_se: self.tail_se(),
339            ci_low_ns: effect.credible_interval_ns.0,
340            ci_high_ns: effect.credible_interval_ns.1,
341            pattern: effect.pattern,
342            leak_probability: self.leak_probability,
343            projection_mismatch_q: self.projection_mismatch_q,
344            n: self.n,
345            lambda_mean: self.lambda_mean.unwrap_or(1.0),
346            lambda_mixing_ok: self.lambda_mixing_ok.unwrap_or(true),
347            kappa_mean: self.kappa_mean.unwrap_or(1.0),
348            kappa_cv: self.kappa_cv.unwrap_or(0.0),
349            kappa_ess: self.kappa_ess.unwrap_or(0.0),
350            kappa_mixing_ok: self.kappa_mixing_ok.unwrap_or(true),
351        }
352    }
353}
354
355#[cfg(test)]
356mod tests {
357    use super::*;
358
359    #[test]
360    fn test_posterior_accessors() {
361        let delta_post =
362            Vector9::from_row_slice(&[10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]);
363        let lambda_post = Matrix9::identity();
364        let beta_proj = Vector2::new(10.0, 0.0);
365        let beta_proj_cov = Matrix2::new(4.0, 0.0, 0.0, 1.0);
366
367        let posterior = Posterior::new(
368            delta_post,
369            lambda_post,
370            beta_proj,
371            beta_proj_cov,
372            Vec::new(), // beta_draws
373            0.75,
374            0.5,
375            1000,
376        );
377
378        assert_eq!(posterior.shift_ns(), 10.0);
379        assert_eq!(posterior.tail_ns(), 0.0);
380        assert_eq!(posterior.shift_se(), 2.0); // sqrt(4.0)
381        assert_eq!(posterior.tail_se(), 1.0); // sqrt(1.0)
382        assert_eq!(posterior.leak_probability, 0.75);
383        assert_eq!(posterior.n, 1000);
384        assert!((posterior.max_effect_ns() - 10.0).abs() < 1e-10);
385    }
386
387    #[test]
388    fn test_posterior_clone() {
389        let delta_post = Vector9::from_row_slice(&[5.0; 9]);
390        let lambda_post = Matrix9::identity();
391        let beta_proj = Vector2::new(5.0, 3.0);
392        let beta_proj_cov = Matrix2::identity();
393
394        let posterior = Posterior::new(
395            delta_post,
396            lambda_post,
397            beta_proj,
398            beta_proj_cov,
399            Vec::new(), // beta_draws
400            0.5,
401            1.0,
402            500,
403        );
404
405        let cloned = posterior.clone();
406        assert_eq!(cloned.shift_ns(), posterior.shift_ns());
407        assert_eq!(cloned.tail_ns(), posterior.tail_ns());
408        assert_eq!(cloned.leak_probability, posterior.leak_probability);
409    }
410}