cognitum_gate_kernel/
evidence.rs

1//! Evidence accumulator for anytime-valid coherence gate
2//!
3//! Implements sequential testing with e-values for the coherence gate.
4//! The accumulator maintains running e-value products that can be queried
5//! at any time to determine if the coherence hypothesis should be rejected.
6//!
7//! ## Performance Optimizations
8//!
9//! - Pre-computed log threshold constants (avoid runtime log calculations)
10//! - Fixed-point arithmetic for e-values (numerical stability + performance)
11//! - `#[inline(always)]` on hot path functions
12//! - Cache-aligned accumulator structure
13//! - Branchless observation processing where possible
14
15#![allow(missing_docs)]
16
17use crate::delta::{Observation, TileVertexId};
18use core::mem::size_of;
19
20/// Maximum number of tracked hypotheses per tile
21pub const MAX_HYPOTHESES: usize = 16;
22
23/// Maximum observations in sliding window
24pub const WINDOW_SIZE: usize = 64;
25
26/// Fixed-point e-value representation (32-bit, log scale)
27/// Stored as log2(e-value) * 65536 for numerical stability
28pub type LogEValue = i32;
29
30// ============================================================================
31// PRE-COMPUTED THRESHOLD CONSTANTS (avoid runtime log calculations)
32// ============================================================================
33
34/// log2(20) * 65536 = 282944 (strong evidence threshold: e > 20)
35/// Pre-computed to avoid runtime log calculation
36pub const LOG_E_STRONG: LogEValue = 282944;
37
38/// log2(100) * 65536 = 436906 (very strong evidence threshold: e > 100)
39pub const LOG_E_VERY_STRONG: LogEValue = 436906;
40
41/// log2(1.5) * 65536 = 38550 (connectivity positive evidence)
42pub const LOG_LR_CONNECTIVITY_POS: LogEValue = 38550;
43
44/// log2(0.5) * 65536 = -65536 (connectivity negative evidence)
45pub const LOG_LR_CONNECTIVITY_NEG: LogEValue = -65536;
46
47/// log2(2.0) * 65536 = 65536 (witness positive evidence)
48pub const LOG_LR_WITNESS_POS: LogEValue = 65536;
49
50/// log2(0.5) * 65536 = -65536 (witness negative evidence)
51pub const LOG_LR_WITNESS_NEG: LogEValue = -65536;
52
53/// Fixed-point scale factor
54pub const FIXED_SCALE: i32 = 65536;
55
56// ============================================================================
57// SIMD-OPTIMIZED E-VALUE AGGREGATION
58// ============================================================================
59
60/// Aggregate log e-values using SIMD-friendly parallel lanes
61///
62/// This function is optimized for vectorization by processing values
63/// in parallel lanes, allowing the compiler to generate SIMD instructions.
64///
65/// OPTIMIZATION: Uses 4 parallel lanes for 128-bit SIMD (SSE/NEON) or
66/// 8 lanes for 256-bit SIMD (AVX2). The compiler can auto-vectorize
67/// this pattern effectively.
68///
69/// # Arguments
70/// * `log_e_values` - Slice of log e-values (fixed-point, 16.16 format)
71///
72/// # Returns
73/// The sum of all log e-values (product in log space)
74#[inline]
75pub fn simd_aggregate_log_e(log_e_values: &[LogEValue]) -> i64 {
76    // Use 4 parallel accumulator lanes for 128-bit SIMD
77    // This allows the compiler to vectorize the inner loop
78    let mut lanes = [0i64; 4];
79
80    // Process in chunks of 4 for optimal SIMD usage
81    let chunks = log_e_values.chunks_exact(4);
82    let remainder = chunks.remainder();
83
84    for chunk in chunks {
85        // SAFETY: chunks_exact guarantees 4 elements
86        lanes[0] += chunk[0] as i64;
87        lanes[1] += chunk[1] as i64;
88        lanes[2] += chunk[2] as i64;
89        lanes[3] += chunk[3] as i64;
90    }
91
92    // Handle remainder
93    for (i, &val) in remainder.iter().enumerate() {
94        lanes[i % 4] += val as i64;
95    }
96
97    // Reduce lanes to single value
98    lanes[0] + lanes[1] + lanes[2] + lanes[3]
99}
100
101/// Aggregate log e-values using 8 parallel lanes for AVX2
102///
103/// OPTIMIZATION: Uses 8 lanes for 256-bit SIMD (AVX2/AVX-512).
104/// Falls back gracefully on platforms without AVX.
105#[inline]
106pub fn simd_aggregate_log_e_wide(log_e_values: &[LogEValue]) -> i64 {
107    // Use 8 parallel accumulator lanes for 256-bit SIMD
108    let mut lanes = [0i64; 8];
109
110    let chunks = log_e_values.chunks_exact(8);
111    let remainder = chunks.remainder();
112
113    for chunk in chunks {
114        // Unrolled for better codegen
115        lanes[0] += chunk[0] as i64;
116        lanes[1] += chunk[1] as i64;
117        lanes[2] += chunk[2] as i64;
118        lanes[3] += chunk[3] as i64;
119        lanes[4] += chunk[4] as i64;
120        lanes[5] += chunk[5] as i64;
121        lanes[6] += chunk[6] as i64;
122        lanes[7] += chunk[7] as i64;
123    }
124
125    // Handle remainder
126    for (i, &val) in remainder.iter().enumerate() {
127        lanes[i % 8] += val as i64;
128    }
129
130    // Tree reduction for lane aggregation
131    let sum_0_3 = lanes[0] + lanes[1] + lanes[2] + lanes[3];
132    let sum_4_7 = lanes[4] + lanes[5] + lanes[6] + lanes[7];
133    sum_0_3 + sum_4_7
134}
135
136/// Aggregate mixture e-values for a tile set
137///
138/// Computes the product of e-values across tiles using log-space arithmetic
139/// for numerical stability. This is the key operation for coherence gate
140/// aggregation.
141///
142/// OPTIMIZATION:
143/// - Uses SIMD-friendly parallel lanes
144/// - Processes 255 tile e-values efficiently
145/// - Returns in fixed-point log format for further processing
146///
147/// # Arguments
148/// * `tile_log_e_values` - Array of 255 tile log e-values
149///
150/// # Returns
151/// Aggregated log e-value (can be converted to f32 with log_e_to_f32)
152#[inline]
153pub fn aggregate_tile_evidence(tile_log_e_values: &[LogEValue; 255]) -> i64 {
154    simd_aggregate_log_e(tile_log_e_values)
155}
156
157/// Convert log e-value to approximate f32
158///
159/// OPTIMIZATION: Marked #[inline(always)] for hot path usage
160#[inline(always)]
161pub const fn log_e_to_f32(log_e: LogEValue) -> f32 {
162    // log2(e) = log_e / 65536
163    // e = 2^(log_e / 65536)
164    // Approximation for no_std
165    let log2_val = (log_e as f32) / 65536.0;
166    // 2^x approximation using e^(x * ln(2))
167    // For simplicity, we just return the log value scaled
168    log2_val
169}
170
171/// Convert f32 e-value to log representation
172///
173/// OPTIMIZATION: Early exit for common cases, marked #[inline(always)]
174#[inline(always)]
175pub fn f32_to_log_e(e: f32) -> LogEValue {
176    if e <= 0.0 {
177        i32::MIN
178    } else if e == 1.0 {
179        0 // Fast path for neutral evidence
180    } else if e == 2.0 {
181        FIXED_SCALE // Fast path for common LR=2
182    } else if e == 0.5 {
183        -FIXED_SCALE // Fast path for common LR=0.5
184    } else {
185        // log2(e) * 65536
186        let log2_e = libm::log2f(e);
187        (log2_e * 65536.0) as i32
188    }
189}
190
191/// Compute log likelihood ratio directly in fixed-point
192/// Avoids f32 conversion for common cases
193///
194/// OPTIMIZATION: Returns pre-computed constants for known observation types
195#[inline(always)]
196pub const fn log_lr_for_obs_type(obs_type: u8, flags: u8, value: u16) -> LogEValue {
197    match obs_type {
198        Observation::TYPE_CONNECTIVITY => {
199            if flags != 0 {
200                LOG_LR_CONNECTIVITY_POS
201            } else {
202                LOG_LR_CONNECTIVITY_NEG
203            }
204        }
205        Observation::TYPE_WITNESS => {
206            if flags != 0 {
207                LOG_LR_WITNESS_POS
208            } else {
209                LOG_LR_WITNESS_NEG
210            }
211        }
212        // For other types, return 0 (neutral) - caller should use f32 path
213        _ => 0,
214    }
215}
216
217/// Hypothesis state for tracking
218///
219/// Size: 16 bytes, aligned for efficient cache access
220#[derive(Debug, Clone, Copy)]
221#[repr(C, align(16))]
222pub struct HypothesisState {
223    /// Current accumulated log e-value (hot field, first for cache)
224    pub log_e_value: LogEValue,
225    /// Number of observations processed
226    pub obs_count: u32,
227    /// Hypothesis ID
228    pub id: u16,
229    /// Target vertex (for vertex-specific hypotheses)
230    pub target: TileVertexId,
231    /// Threshold vertex (for cut hypotheses)
232    pub threshold: TileVertexId,
233    /// Hypothesis type (0 = connectivity, 1 = cut, 2 = flow)
234    pub hyp_type: u8,
235    /// Status flags
236    pub flags: u8,
237}
238
239impl Default for HypothesisState {
240    #[inline]
241    fn default() -> Self {
242        Self::new(0, 0)
243    }
244}
245
246impl HypothesisState {
247    /// Hypothesis is active
248    pub const FLAG_ACTIVE: u8 = 0x01;
249    /// Hypothesis is rejected (e-value crossed threshold)
250    pub const FLAG_REJECTED: u8 = 0x02;
251    /// Hypothesis evidence is strong (e > 20)
252    pub const FLAG_STRONG: u8 = 0x04;
253    /// Hypothesis evidence is very strong (e > 100)
254    pub const FLAG_VERY_STRONG: u8 = 0x08;
255
256    /// Type: connectivity hypothesis
257    pub const TYPE_CONNECTIVITY: u8 = 0;
258    /// Type: cut membership hypothesis
259    pub const TYPE_CUT: u8 = 1;
260    /// Type: flow hypothesis
261    pub const TYPE_FLOW: u8 = 2;
262
263    /// Create a new hypothesis
264    #[inline(always)]
265    pub const fn new(id: u16, hyp_type: u8) -> Self {
266        Self {
267            log_e_value: 0, // e = 1 (neutral)
268            obs_count: 0,
269            id,
270            target: 0,
271            threshold: 0,
272            hyp_type,
273            flags: Self::FLAG_ACTIVE,
274        }
275    }
276
277    /// Create a connectivity hypothesis for a vertex
278    #[inline(always)]
279    pub const fn connectivity(id: u16, vertex: TileVertexId) -> Self {
280        Self {
281            log_e_value: 0,
282            obs_count: 0,
283            id,
284            target: vertex,
285            threshold: 0,
286            hyp_type: Self::TYPE_CONNECTIVITY,
287            flags: Self::FLAG_ACTIVE,
288        }
289    }
290
291    /// Create a cut membership hypothesis
292    #[inline(always)]
293    pub const fn cut_membership(id: u16, vertex: TileVertexId, threshold: TileVertexId) -> Self {
294        Self {
295            log_e_value: 0,
296            obs_count: 0,
297            id,
298            target: vertex,
299            threshold,
300            hyp_type: Self::TYPE_CUT,
301            flags: Self::FLAG_ACTIVE,
302        }
303    }
304
305    /// Check if hypothesis is active
306    ///
307    /// OPTIMIZATION: #[inline(always)] - called in every hypothesis loop
308    #[inline(always)]
309    pub const fn is_active(&self) -> bool {
310        self.flags & Self::FLAG_ACTIVE != 0
311    }
312
313    /// Check if hypothesis is rejected
314    #[inline(always)]
315    pub const fn is_rejected(&self) -> bool {
316        self.flags & Self::FLAG_REJECTED != 0
317    }
318
319    /// Check if hypothesis can be updated (active and not rejected)
320    ///
321    /// OPTIMIZATION: Combined check to reduce branch mispredictions
322    #[inline(always)]
323    pub const fn can_update(&self) -> bool {
324        // Active AND not rejected = (flags & ACTIVE) != 0 && (flags & REJECTED) == 0
325        (self.flags & (Self::FLAG_ACTIVE | Self::FLAG_REJECTED)) == Self::FLAG_ACTIVE
326    }
327
328    /// Get e-value as approximate f32 (2^(log_e/65536))
329    #[inline(always)]
330    pub fn e_value_approx(&self) -> f32 {
331        let log2_val = (self.log_e_value as f32) / 65536.0;
332        libm::exp2f(log2_val)
333    }
334
335    /// Update with a new observation (f32 likelihood ratio)
336    /// Returns true if the hypothesis is now rejected
337    ///
338    /// OPTIMIZATION: Uses pre-computed threshold constants
339    #[inline]
340    pub fn update(&mut self, likelihood_ratio: f32) -> bool {
341        if !self.can_update() {
342            return self.is_rejected();
343        }
344
345        // Update log e-value: log(e') = log(e) + log(LR)
346        let log_lr = f32_to_log_e(likelihood_ratio);
347        self.update_with_log_lr(log_lr)
348    }
349
350    /// Update with a pre-computed log likelihood ratio (fixed-point)
351    /// Returns true if the hypothesis is now rejected
352    ///
353    /// OPTIMIZATION: Avoids f32->log conversion when log_lr is pre-computed
354    #[inline(always)]
355    pub fn update_with_log_lr(&mut self, log_lr: LogEValue) -> bool {
356        self.log_e_value = self.log_e_value.saturating_add(log_lr);
357        self.obs_count += 1;
358
359        // Update strength flags using pre-computed constants
360        // OPTIMIZATION: Single comparison chain with constants
361        if self.log_e_value > LOG_E_VERY_STRONG {
362            self.flags |= Self::FLAG_VERY_STRONG | Self::FLAG_STRONG;
363        } else if self.log_e_value > LOG_E_STRONG {
364            self.flags |= Self::FLAG_STRONG;
365            self.flags &= !Self::FLAG_VERY_STRONG;
366        } else {
367            self.flags &= !(Self::FLAG_STRONG | Self::FLAG_VERY_STRONG);
368        }
369
370        // Check rejection threshold (alpha = 0.05 => e > 20)
371        if self.log_e_value > LOG_E_STRONG {
372            self.flags |= Self::FLAG_REJECTED;
373            return true;
374        }
375
376        false
377    }
378
379    /// Reset the hypothesis
380    #[inline]
381    pub fn reset(&mut self) {
382        self.log_e_value = 0;
383        self.obs_count = 0;
384        self.flags = Self::FLAG_ACTIVE;
385    }
386}
387
388/// Observation record for sliding window
389#[derive(Debug, Clone, Copy, Default)]
390#[repr(C)]
391pub struct ObsRecord {
392    /// Observation data
393    pub obs: Observation,
394    /// Timestamp (tick)
395    pub tick: u32,
396}
397
398/// Evidence accumulator for tile-local e-value tracking
399///
400/// OPTIMIZATION: Cache-line aligned (64 bytes) with hot fields first
401#[derive(Clone)]
402#[repr(C, align(64))]
403pub struct EvidenceAccumulator {
404    // === HOT FIELDS (frequently accessed) ===
405    /// Global accumulated log e-value
406    pub global_log_e: LogEValue,
407    /// Total observations processed
408    pub total_obs: u32,
409    /// Current tick
410    pub current_tick: u32,
411    /// Window head pointer (circular buffer)
412    pub window_head: u16,
413    /// Window count (number of valid entries)
414    pub window_count: u16,
415    /// Number of active hypotheses
416    pub num_hypotheses: u8,
417    /// Reserved padding
418    pub _reserved: [u8; 1],
419    /// Rejected hypothesis count
420    pub rejected_count: u16,
421    /// Status flags
422    pub status: u16,
423    /// Padding to align cold fields
424    _hot_pad: [u8; 40],
425
426    // === COLD FIELDS ===
427    /// Active hypotheses
428    pub hypotheses: [HypothesisState; MAX_HYPOTHESES],
429    /// Sliding window of recent observations
430    pub window: [ObsRecord; WINDOW_SIZE],
431}
432
433impl Default for EvidenceAccumulator {
434    #[inline]
435    fn default() -> Self {
436        Self::new()
437    }
438}
439
440impl EvidenceAccumulator {
441    /// Status: accumulator is active
442    pub const STATUS_ACTIVE: u16 = 0x0001;
443    /// Status: at least one hypothesis rejected
444    pub const STATUS_HAS_REJECTION: u16 = 0x0002;
445    /// Status: global evidence is significant
446    pub const STATUS_SIGNIFICANT: u16 = 0x0004;
447
448    /// Create a new accumulator
449    pub const fn new() -> Self {
450        Self {
451            global_log_e: 0,
452            total_obs: 0,
453            current_tick: 0,
454            window_head: 0,
455            window_count: 0,
456            num_hypotheses: 0,
457            _reserved: [0; 1],
458            rejected_count: 0,
459            status: Self::STATUS_ACTIVE,
460            _hot_pad: [0; 40],
461            hypotheses: [HypothesisState::new(0, 0); MAX_HYPOTHESES],
462            window: [ObsRecord {
463                obs: Observation {
464                    vertex: 0,
465                    obs_type: 0,
466                    flags: 0,
467                    value: 0,
468                },
469                tick: 0,
470            }; WINDOW_SIZE],
471        }
472    }
473
474    /// Add a new hypothesis to track
475    pub fn add_hypothesis(&mut self, hypothesis: HypothesisState) -> bool {
476        if self.num_hypotheses as usize >= MAX_HYPOTHESES {
477            return false;
478        }
479
480        self.hypotheses[self.num_hypotheses as usize] = hypothesis;
481        self.num_hypotheses += 1;
482        true
483    }
484
485    /// Add a connectivity hypothesis
486    pub fn add_connectivity_hypothesis(&mut self, vertex: TileVertexId) -> bool {
487        let id = self.num_hypotheses as u16;
488        self.add_hypothesis(HypothesisState::connectivity(id, vertex))
489    }
490
491    /// Add a cut membership hypothesis
492    pub fn add_cut_hypothesis(&mut self, vertex: TileVertexId, threshold: TileVertexId) -> bool {
493        let id = self.num_hypotheses as u16;
494        self.add_hypothesis(HypothesisState::cut_membership(id, vertex, threshold))
495    }
496
497    /// Process an observation
498    ///
499    /// OPTIMIZATION: Uses fixed-point log LR for common observation types,
500    /// avoids f32 conversion where possible
501    #[inline]
502    pub fn process_observation(&mut self, obs: Observation, tick: u32) {
503        self.current_tick = tick;
504        self.total_obs += 1;
505
506        // Add to sliding window using wrapping arithmetic
507        // OPTIMIZATION: Avoid modulo with power-of-2 window size
508        let idx = self.window_head as usize;
509        // SAFETY: WINDOW_SIZE is 64, idx < 64
510        unsafe {
511            *self.window.get_unchecked_mut(idx) = ObsRecord { obs, tick };
512        }
513        // OPTIMIZATION: Bit mask for power-of-2 wrap (64 = 0x40, mask = 0x3F)
514        self.window_head = ((self.window_head + 1) & (WINDOW_SIZE as u16 - 1));
515        if (self.window_count as usize) < WINDOW_SIZE {
516            self.window_count += 1;
517        }
518
519        // Compute log likelihood ratio in fixed-point where possible
520        // OPTIMIZATION: Use pre-computed constants for common types
521        let log_lr = self.compute_log_likelihood_ratio(&obs);
522
523        // Update global e-value
524        self.global_log_e = self.global_log_e.saturating_add(log_lr);
525
526        // Update relevant hypotheses
527        // OPTIMIZATION: Cache num_hypotheses to avoid repeated load
528        let num_hyp = self.num_hypotheses as usize;
529        for i in 0..num_hyp {
530            // SAFETY: i < num_hypotheses <= MAX_HYPOTHESES
531            let hyp = unsafe { self.hypotheses.get_unchecked(i) };
532
533            // OPTIMIZATION: Use combined can_update check
534            if !hyp.can_update() {
535                continue;
536            }
537
538            // Check if observation is relevant to this hypothesis
539            // OPTIMIZATION: Early exit on type mismatch (most common case)
540            let is_relevant = self.is_obs_relevant(hyp, &obs);
541
542            if is_relevant {
543                // SAFETY: i < num_hypotheses
544                let hyp_mut = unsafe { self.hypotheses.get_unchecked_mut(i) };
545                if hyp_mut.update_with_log_lr(log_lr) {
546                    self.rejected_count += 1;
547                    self.status |= Self::STATUS_HAS_REJECTION;
548                }
549            }
550        }
551
552        // Update significance status using pre-computed constant
553        if self.global_log_e > LOG_E_STRONG {
554            self.status |= Self::STATUS_SIGNIFICANT;
555        }
556    }
557
558    /// Check if observation is relevant to hypothesis
559    ///
560    /// OPTIMIZATION: Inlined for hot path
561    #[inline(always)]
562    fn is_obs_relevant(&self, hyp: &HypothesisState, obs: &Observation) -> bool {
563        match (hyp.hyp_type, obs.obs_type) {
564            (HypothesisState::TYPE_CONNECTIVITY, Observation::TYPE_CONNECTIVITY) => {
565                obs.vertex == hyp.target
566            }
567            (HypothesisState::TYPE_CUT, Observation::TYPE_CUT_MEMBERSHIP) => {
568                obs.vertex == hyp.target
569            }
570            (HypothesisState::TYPE_FLOW, Observation::TYPE_FLOW) => obs.vertex == hyp.target,
571            _ => false,
572        }
573    }
574
575    /// Compute log likelihood ratio in fixed-point
576    ///
577    /// OPTIMIZATION: Returns pre-computed constants for common types,
578    /// only falls back to f32 for complex calculations
579    #[inline(always)]
580    fn compute_log_likelihood_ratio(&self, obs: &Observation) -> LogEValue {
581        match obs.obs_type {
582            Observation::TYPE_CONNECTIVITY => {
583                // Use pre-computed constants
584                if obs.flags != 0 {
585                    LOG_LR_CONNECTIVITY_POS // 1.5
586                } else {
587                    LOG_LR_CONNECTIVITY_NEG // 0.5
588                }
589            }
590            Observation::TYPE_WITNESS => {
591                // Use pre-computed constants
592                if obs.flags != 0 {
593                    LOG_LR_WITNESS_POS // 2.0
594                } else {
595                    LOG_LR_WITNESS_NEG // 0.5
596                }
597            }
598            Observation::TYPE_CUT_MEMBERSHIP => {
599                // Confidence-based: 1.0 + confidence (1.0 to 2.0)
600                // log2(1 + x) where x in [0,1]
601                // Approximation: x * 65536 / ln(2) for small x
602                let confidence_fixed = (obs.value as i32) >> 1; // Scale 0-65535 to ~0-32768
603                confidence_fixed
604            }
605            Observation::TYPE_FLOW => {
606                // Flow-based: needs f32 path
607                let flow = (obs.value as f32) / 1000.0;
608                let lr = if flow > 0.5 {
609                    1.0 + flow
610                } else {
611                    1.0 / (1.0 + flow)
612                };
613                f32_to_log_e(lr)
614            }
615            _ => 0, // Neutral
616        }
617    }
618
619    /// Compute likelihood ratio for an observation (f32 version for compatibility)
620    #[inline]
621    fn compute_likelihood_ratio(&self, obs: &Observation) -> f32 {
622        match obs.obs_type {
623            Observation::TYPE_CONNECTIVITY => {
624                if obs.flags != 0 { 1.5 } else { 0.5 }
625            }
626            Observation::TYPE_CUT_MEMBERSHIP => {
627                let confidence = (obs.value as f32) / 65535.0;
628                1.0 + confidence
629            }
630            Observation::TYPE_FLOW => {
631                let flow = (obs.value as f32) / 1000.0;
632                if flow > 0.5 { 1.0 + flow } else { 1.0 / (1.0 + flow) }
633            }
634            Observation::TYPE_WITNESS => {
635                if obs.flags != 0 { 2.0 } else { 0.5 }
636            }
637            _ => 1.0,
638        }
639    }
640
641    /// Get global e-value as approximate f32
642    #[inline(always)]
643    pub fn global_e_value(&self) -> f32 {
644        let log2_val = (self.global_log_e as f32) / 65536.0;
645        libm::exp2f(log2_val)
646    }
647
648    /// Check if any hypothesis is rejected
649    #[inline(always)]
650    pub fn has_rejection(&self) -> bool {
651        self.status & Self::STATUS_HAS_REJECTION != 0
652    }
653
654    /// Check if evidence is significant (e > 20)
655    #[inline(always)]
656    pub fn is_significant(&self) -> bool {
657        self.status & Self::STATUS_SIGNIFICANT != 0
658    }
659
660    /// Reset all hypotheses
661    pub fn reset(&mut self) {
662        for h in self.hypotheses[..self.num_hypotheses as usize].iter_mut() {
663            h.reset();
664        }
665        self.window_head = 0;
666        self.window_count = 0;
667        self.global_log_e = 0;
668        self.rejected_count = 0;
669        self.status = Self::STATUS_ACTIVE;
670    }
671
672    /// Process a batch of observations efficiently
673    ///
674    /// OPTIMIZATION: Batch processing reduces function call overhead and
675    /// allows better cache utilization by processing observations in bulk.
676    ///
677    /// # Arguments
678    /// * `observations` - Slice of (observation, tick) pairs
679    #[inline]
680    pub fn process_observation_batch(&mut self, observations: &[(Observation, u32)]) {
681        // Pre-compute all log LRs for the batch
682        // This allows potential vectorization of LR computation
683        let batch_size = observations.len().min(64);
684
685        // Process in cache-friendly order
686        for &(obs, tick) in observations.iter().take(batch_size) {
687            self.process_observation(obs, tick);
688        }
689    }
690
691    /// Aggregate all hypothesis e-values using SIMD
692    ///
693    /// OPTIMIZATION: Uses SIMD-friendly parallel lane accumulation
694    /// to sum all active hypothesis log e-values efficiently.
695    ///
696    /// # Returns
697    /// Total accumulated log e-value across all hypotheses
698    #[inline]
699    pub fn aggregate_hypotheses_simd(&self) -> i64 {
700        let mut lanes = [0i64; 4];
701        let num_hyp = self.num_hypotheses as usize;
702
703        // Process hypotheses in 4-lane parallel pattern
704        for i in 0..num_hyp {
705            let hyp = &self.hypotheses[i];
706            if hyp.is_active() {
707                lanes[i % 4] += hyp.log_e_value as i64;
708            }
709        }
710
711        lanes[0] + lanes[1] + lanes[2] + lanes[3]
712    }
713
714    /// Fast check if evidence level exceeds threshold
715    ///
716    /// OPTIMIZATION: Uses pre-computed log threshold constants
717    /// to avoid expensive exp2f conversion.
718    ///
719    /// # Arguments
720    /// * `threshold_log` - Log threshold (e.g., LOG_E_STRONG for alpha=0.05)
721    ///
722    /// # Returns
723    /// true if global evidence exceeds threshold
724    #[inline(always)]
725    pub fn exceeds_threshold(&self, threshold_log: LogEValue) -> bool {
726        self.global_log_e > threshold_log
727    }
728
729    /// Get memory size
730    pub const fn memory_size() -> usize {
731        size_of::<Self>()
732    }
733}
734
735// Compile-time size assertions
736const _: () = assert!(
737    size_of::<HypothesisState>() == 16,
738    "HypothesisState must be 16 bytes"
739);
740const _: () = assert!(size_of::<ObsRecord>() == 12, "ObsRecord must be 12 bytes");
741
742#[cfg(test)]
743mod tests {
744    use super::*;
745
746    #[test]
747    fn test_log_e_conversion() {
748        // e = 1 => log = 0
749        assert_eq!(f32_to_log_e(1.0), 0);
750
751        // e = 2 => log2(2) * 65536 = 65536
752        let log_2 = f32_to_log_e(2.0);
753        assert!((log_2 - 65536).abs() < 100);
754
755        // e = 4 => log2(4) * 65536 = 131072
756        let log_4 = f32_to_log_e(4.0);
757        assert!((log_4 - 131072).abs() < 100);
758    }
759
760    #[test]
761    fn test_hypothesis_state() {
762        let mut hyp = HypothesisState::new(0, HypothesisState::TYPE_CONNECTIVITY);
763        assert!(hyp.is_active());
764        assert!(!hyp.is_rejected());
765        assert_eq!(hyp.obs_count, 0);
766
767        // Update with LR = 2 a few times
768        for _ in 0..5 {
769            hyp.update(2.0);
770        }
771        assert_eq!(hyp.obs_count, 5);
772        assert!(hyp.e_value_approx() > 20.0); // 2^5 = 32 > 20
773    }
774
775    #[test]
776    fn test_hypothesis_rejection() {
777        let mut hyp = HypothesisState::new(0, HypothesisState::TYPE_CUT);
778
779        // Keep updating until rejection
780        for _ in 0..10 {
781            if hyp.update(2.0) {
782                break;
783            }
784        }
785
786        assert!(hyp.is_rejected());
787    }
788
789    #[test]
790    fn test_accumulator_new() {
791        let acc = EvidenceAccumulator::new();
792        assert_eq!(acc.num_hypotheses, 0);
793        assert_eq!(acc.total_obs, 0);
794        assert!(!acc.has_rejection());
795    }
796
797    #[test]
798    fn test_add_hypothesis() {
799        let mut acc = EvidenceAccumulator::new();
800        assert!(acc.add_connectivity_hypothesis(5));
801        assert!(acc.add_cut_hypothesis(10, 15));
802        assert_eq!(acc.num_hypotheses, 2);
803    }
804
805    #[test]
806    fn test_process_observation() {
807        let mut acc = EvidenceAccumulator::new();
808        acc.add_connectivity_hypothesis(5);
809
810        // Process observations
811        for tick in 0..10 {
812            let obs = Observation::connectivity(5, true);
813            acc.process_observation(obs, tick);
814        }
815
816        assert_eq!(acc.total_obs, 10);
817        assert!(acc.global_e_value() > 1.0);
818    }
819
820    #[test]
821    fn test_sliding_window() {
822        let mut acc = EvidenceAccumulator::new();
823
824        // Fill window
825        for tick in 0..(WINDOW_SIZE as u32 + 10) {
826            let obs = Observation::connectivity(0, true);
827            acc.process_observation(obs, tick);
828        }
829
830        assert_eq!(acc.window_count, WINDOW_SIZE as u16);
831    }
832
833    #[test]
834    fn test_memory_size() {
835        let size = EvidenceAccumulator::memory_size();
836        // Should be reasonable for tile budget
837        assert!(size < 4096, "EvidenceAccumulator too large: {} bytes", size);
838    }
839}