Skip to main content

m1nd_core/
temporal.rs

1// === crates/m1nd-core/src/temporal.rs ===
2
3use std::cmp::Ordering;
4use std::collections::{BinaryHeap, HashMap, VecDeque};
5
6use crate::domain::DomainConfig;
7use crate::error::{M1ndError, M1ndResult};
8use crate::graph::Graph;
9use crate::types::*;
10
11// ---------------------------------------------------------------------------
12// Constants from temporal_v2.py
13// ---------------------------------------------------------------------------
14
15/// Default half-life for temporal decay (hours).
16pub const DEFAULT_HALF_LIFE_HOURS: f32 = 168.0; // 7 days
17/// Decay formula: exp(-k * age_hours), k = ln(2) / half_life.
18pub const LN2: f32 = std::f32::consts::LN_2;
19/// Default chain budget (FM-TMP-005). Prevents combinatorial explosion.
20pub const DEFAULT_CHAIN_BUDGET: u64 = 10_000;
21/// Default max co-change matrix entries (FM-TMP-001). Prevents O(N^2) memory.
22pub const DEFAULT_MATRIX_BUDGET: u64 = 500_000;
23/// Default causal chain max depth.
24pub const DEFAULT_CHAIN_MAX_DEPTH: u8 = 6;
25/// Resurrection dormancy depth threshold (days).
26pub const RESURRECTION_DORMANCY_THRESHOLD_DAYS: f32 = 35.0;
27/// Co-change decay per BFS hop.
28pub const CO_CHANGE_DECAY_FACTOR: f32 = 0.95;
29/// Bootstrap weight relative to learned values.
30pub const BOOTSTRAP_WEIGHT: f32 = 0.3;
31/// Max entries per row in co-change matrix.
32pub const CO_CHANGE_MAX_ROW: usize = 100;
33/// Dormant hours threshold.
34pub const DORMANT_HOURS: f64 = 720.0;
35/// Resurrection additive floor.
36pub const RESURRECTION_BASE_FLOOR: f32 = 0.3;
37/// Resurrection depth scale.
38pub const RESURRECTION_DEPTH_SCALE: f32 = 0.1;
39/// Raw decay floor to prevent underflow.
40pub const RAW_DECAY_FLOOR: f32 = 1e-6;
41
42// ---------------------------------------------------------------------------
43// CoChangeMatrix — sparse co-change tracking (temporal_v2.py CoChangeMatrix)
44// FM-TMP-001 fix: CSR-like sparse storage with hard entry budget.
45// Replaces: temporal_v2.py CoChangeMatrix (Python HashMap<HashMap>)
46// ---------------------------------------------------------------------------
47
48/// Entry in the co-change matrix: (target_node, coupling_strength).
49#[derive(Clone, Copy, Debug)]
50pub struct CoChangeEntry {
51    pub target: NodeId,
52    pub strength: FiniteF32,
53}
54
55/// Sparse co-change matrix with bounded entry count.
56/// Bootstrapped from graph structure (BFS depth 3 from each node),
57/// refined with real co-change observations via `record_co_change`.
58/// FM-TMP-001: hard cap on total entries prevents O(N^2) memory.
59pub struct CoChangeMatrix {
60    /// Per-node sorted list of co-change entries.
61    rows: Vec<Vec<CoChangeEntry>>,
62    /// Total entries across all rows (for budget enforcement).
63    total_entries: u64,
64    /// Maximum total entries allowed.
65    budget: u64,
66    /// Whether bootstrap or learned from real observations.
67    is_learned: bool,
68}
69
70impl CoChangeMatrix {
71    /// Bootstrap co-change from graph structure (BFS depth 3 from each node).
72    /// Replaces: temporal_v2.py CoChangeMatrix.bootstrap()
73    pub fn bootstrap(graph: &Graph, budget: u64) -> M1ndResult<Self> {
74        let n = graph.num_nodes() as usize;
75        let mut rows = vec![Vec::new(); n];
76        let mut total_entries = 0u64;
77
78        for start in 0..n {
79            if total_entries >= budget {
80                break;
81            }
82
83            let start_node = NodeId::new(start as u32);
84            let mut visited = vec![false; n];
85            visited[start] = true;
86
87            let mut queue = VecDeque::new();
88            queue.push_back((start_node, 0u8, 1.0f32));
89
90            let mut entries: Vec<CoChangeEntry> = Vec::new();
91
92            while let Some((node, depth, strength)) = queue.pop_front() {
93                if depth >= 3 {
94                    continue;
95                }
96
97                let range = graph.csr.out_range(node);
98                for j in range {
99                    let tgt = graph.csr.targets[j];
100                    let tgt_idx = tgt.as_usize();
101                    if tgt_idx >= n || visited[tgt_idx] {
102                        continue;
103                    }
104                    visited[tgt_idx] = true;
105
106                    let w = graph.csr.read_weight(EdgeIdx::new(j as u32)).get();
107                    let new_strength = strength * w * CO_CHANGE_DECAY_FACTOR * BOOTSTRAP_WEIGHT;
108
109                    if new_strength > 0.001 && entries.len() < CO_CHANGE_MAX_ROW {
110                        entries.push(CoChangeEntry {
111                            target: tgt,
112                            strength: FiniteF32::new(new_strength),
113                        });
114                    }
115
116                    queue.push_back((tgt, depth + 1, new_strength));
117                }
118            }
119
120            entries.sort_by(|a, b| b.strength.cmp(&a.strength));
121            entries.truncate(CO_CHANGE_MAX_ROW);
122            total_entries += entries.len() as u64;
123            rows[start] = entries;
124        }
125
126        Ok(Self {
127            rows,
128            total_entries,
129            budget,
130            is_learned: false,
131        })
132    }
133
134    /// Record an observed co-change between two nodes.
135    /// Updates coupling strength. Respects budget cap (FM-TMP-001).
136    /// Replaces: temporal_v2.py CoChangeMatrix.record_co_change()
137    pub fn record_co_change(
138        &mut self,
139        source: NodeId,
140        target: NodeId,
141        _timestamp: f64,
142    ) -> M1ndResult<()> {
143        let src_idx = source.as_usize();
144        if src_idx >= self.rows.len() {
145            return Ok(());
146        }
147
148        // Check if entry already exists
149        if let Some(entry) = self.rows[src_idx].iter_mut().find(|e| e.target == target) {
150            // Strengthen existing
151            entry.strength = FiniteF32::new((entry.strength.get() + 0.1).min(1.0));
152            self.is_learned = true;
153            return Ok(());
154        }
155
156        // Budget check
157        if self.total_entries >= self.budget {
158            return Err(M1ndError::MatrixBudgetExhausted {
159                budget: self.budget,
160            });
161        }
162
163        // Row capacity check
164        if self.rows[src_idx].len() >= CO_CHANGE_MAX_ROW {
165            // Replace weakest entry
166            if let Some(weakest) = self.rows[src_idx]
167                .iter()
168                .enumerate()
169                .min_by(|a, b| a.1.strength.cmp(&b.1.strength))
170                .map(|(i, _)| i)
171            {
172                self.rows[src_idx][weakest] = CoChangeEntry {
173                    target,
174                    strength: FiniteF32::new(0.1),
175                };
176            }
177        } else {
178            self.rows[src_idx].push(CoChangeEntry {
179                target,
180                strength: FiniteF32::new(0.1),
181            });
182            self.total_entries += 1;
183        }
184
185        self.is_learned = true;
186        Ok(())
187    }
188
189    /// Predict co-change partners for a changed node, sorted by coupling strength.
190    /// Replaces: temporal_v2.py CoChangeMatrix.predict()
191    pub fn predict(&self, changed_node: NodeId, top_k: usize) -> Vec<CoChangeEntry> {
192        let idx = changed_node.as_usize();
193        if idx >= self.rows.len() {
194            return Vec::new();
195        }
196        let mut entries = self.rows[idx].clone();
197        entries.sort_by(|a, b| b.strength.cmp(&a.strength));
198        entries.truncate(top_k);
199        entries
200    }
201
202    /// Number of entries in the matrix.
203    pub fn num_entries(&self) -> u64 {
204        self.total_entries
205    }
206
207    /// Populate co-change data from git commit groups.
208    /// Each group is a list of external_ids (e.g. "file::src/main.rs") that changed together.
209    /// Resolves IDs via the graph, then records co-change for each pair in the group.
210    pub fn populate_from_commit_groups(
211        &mut self,
212        graph: &Graph,
213        commit_groups: &[Vec<String>],
214    ) -> M1ndResult<()> {
215        for group in commit_groups {
216            // Resolve external IDs to NodeIds
217            let node_ids: Vec<NodeId> = group
218                .iter()
219                .filter_map(|path| {
220                    let file_id = if path.starts_with("file::") {
221                        path.clone()
222                    } else {
223                        format!("file::{}", path)
224                    };
225                    graph.resolve_id(&file_id)
226                })
227                .collect();
228
229            // Record co-change for each pair in the group
230            for i in 0..node_ids.len() {
231                for j in (i + 1)..node_ids.len() {
232                    // Record both directions
233                    let _ = self.record_co_change(node_ids[i], node_ids[j], 0.0);
234                    let _ = self.record_co_change(node_ids[j], node_ids[i], 0.0);
235                }
236            }
237        }
238        Ok(())
239    }
240}
241
242// ---------------------------------------------------------------------------
243// CausalChain — causal chain detection (temporal_v2.py CausalChainDetector)
244// FM-TMP-005 fix: budget-limited DFS.
245// ---------------------------------------------------------------------------
246
247/// A single causal chain: path of nodes with cumulative strength.
248/// Replaces: temporal_v2.py CausalChain dataclass
249#[derive(Clone, Debug)]
250pub struct CausalChain {
251    /// Ordered list of nodes in the chain.
252    pub path: Vec<NodeId>,
253    /// Relation labels between consecutive nodes.
254    pub relations: Vec<InternedStr>,
255    /// Cumulative causal strength (product of edge causal_strengths).
256    pub cumulative_strength: FiniteF32,
257}
258
259/// Heap entry for priority-queue chain detection.
260#[derive(Clone)]
261struct ChainEntry {
262    path: Vec<NodeId>,
263    relations: Vec<InternedStr>,
264    cumulative: f32,
265}
266
267impl PartialEq for ChainEntry {
268    fn eq(&self, other: &Self) -> bool {
269        self.cumulative == other.cumulative
270    }
271}
272impl Eq for ChainEntry {}
273impl PartialOrd for ChainEntry {
274    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
275        Some(self.cmp(other))
276    }
277}
278impl Ord for ChainEntry {
279    fn cmp(&self, other: &Self) -> Ordering {
280        self.cumulative.total_cmp(&other.cumulative)
281    }
282}
283
284/// Causal chain detector. DFS along forward causal edges with budget.
285/// Replaces: temporal_v2.py CausalChainDetector
286pub struct CausalChainDetector {
287    max_depth: u8,
288    min_strength: FiniteF32,
289    chain_budget: u64,
290}
291
292impl CausalChainDetector {
293    pub fn new(max_depth: u8, min_strength: FiniteF32, chain_budget: u64) -> Self {
294        Self {
295            max_depth,
296            min_strength,
297            chain_budget,
298        }
299    }
300
301    pub fn with_defaults() -> Self {
302        Self {
303            max_depth: DEFAULT_CHAIN_MAX_DEPTH,
304            min_strength: FiniteF32::new(0.1),
305            chain_budget: DEFAULT_CHAIN_BUDGET,
306        }
307    }
308
309    /// Detect causal chains from a source node. Budget-limited priority queue (DEC-016, FM-TMP-005).
310    /// Replaces: temporal_v2.py CausalChainDetector.detect()
311    pub fn detect(&self, graph: &Graph, source: NodeId) -> M1ndResult<Vec<CausalChain>> {
312        let n = graph.num_nodes() as usize;
313        if source.as_usize() >= n {
314            return Ok(Vec::new());
315        }
316
317        let mut heap = BinaryHeap::new();
318        let mut chains = Vec::new();
319        let mut ops = 0u64;
320
321        heap.push(ChainEntry {
322            path: vec![source],
323            relations: Vec::new(),
324            cumulative: 1.0,
325        });
326
327        while let Some(entry) = heap.pop() {
328            ops += 1;
329            if ops > self.chain_budget {
330                break; // FM-TMP-005: budget exhausted
331            }
332
333            if entry.cumulative < self.min_strength.get() {
334                continue;
335            }
336
337            let current = *entry.path.last().unwrap();
338            let depth = entry.path.len();
339
340            if depth > 1 {
341                // Record complete chain
342                chains.push(CausalChain {
343                    path: entry.path.clone(),
344                    relations: entry.relations.clone(),
345                    cumulative_strength: FiniteF32::new(entry.cumulative),
346                });
347            }
348
349            if depth > self.max_depth as usize {
350                continue;
351            }
352
353            // Extend along causal edges
354            let range = graph.csr.out_range(current);
355            for j in range {
356                let causal = graph.csr.causal_strengths[j].get();
357                if causal <= 0.0 {
358                    continue;
359                }
360                let tgt = graph.csr.targets[j];
361                // Avoid cycles
362                if entry.path.contains(&tgt) {
363                    continue;
364                }
365
366                let new_cumulative = entry.cumulative * causal;
367                if new_cumulative < self.min_strength.get() {
368                    continue;
369                }
370
371                let mut new_path = entry.path.clone();
372                new_path.push(tgt);
373                let mut new_rels = entry.relations.clone();
374                new_rels.push(graph.csr.relations[j]);
375
376                heap.push(ChainEntry {
377                    path: new_path,
378                    relations: new_rels,
379                    cumulative: new_cumulative,
380                });
381            }
382        }
383
384        chains.sort_by(|a, b| b.cumulative_strength.cmp(&a.cumulative_strength));
385        Ok(chains)
386    }
387}
388
389// ---------------------------------------------------------------------------
390// TemporalDecay — decay + resurrection (temporal_v2.py TemporalDecayResurrection)
391// ---------------------------------------------------------------------------
392
393/// Per-node temporal decay score.
394/// Replaces: temporal_v2.py TemporalDecayResurrection.score()
395#[derive(Clone, Copy, Debug)]
396pub struct DecayScore {
397    pub node: NodeId,
398    /// Raw decay: exp(-k * age_hours).
399    pub raw_decay: FiniteF32,
400    /// Resurrection multiplier (if node went dormant then active again).
401    /// FM-TMP-007 fix: additive-floor + blend instead of pure multiplicative.
402    pub resurrection_multiplier: FiniteF32,
403    /// Final score: raw_decay * resurrection_mult * dormancy_depth.
404    pub final_score: FiniteF32,
405}
406
407/// Temporal decay with resurrection for dormant-then-active nodes.
408/// Per-NodeType half-lives: files decay fast (7d), modules/dirs slow (30d).
409/// Replaces: temporal_v2.py TemporalDecayResurrection
410pub struct TemporalDecayScorer {
411    /// Default k = ln(2) / half_life
412    default_k: PosF32,
413}
414
415impl TemporalDecayScorer {
416    pub fn new(half_life_hours: PosF32) -> Self {
417        let k = PosF32::new(LN2 / half_life_hours.get()).unwrap();
418        Self { default_k: k }
419    }
420
421    /// Per-NodeType half-life in hours. Structural nodes decay slower.
422    fn k_for_type(node_type: NodeType) -> f32 {
423        let half_life = match node_type {
424            NodeType::File => 168.0,     // 7 days — active dev artifact
425            NodeType::Function => 336.0, // 14 days
426            NodeType::Class | NodeType::Struct | NodeType::Enum => 504.0, // 21 days
427            NodeType::Module | NodeType::Directory => 720.0, // 30 days — stable structure
428            NodeType::Type => 504.0,     // 21 days
429            _ => 168.0,                  // default 7 days
430        };
431        LN2 / half_life
432    }
433
434    /// Score a single node. Clamps negative age_hours to 0 (FM-TMP-009).
435    /// DEC-004: additive-floor resurrection.
436    /// Replaces: temporal_v2.py TemporalDecayResurrection.score_one()
437    pub fn score_one(
438        &self,
439        age_hours: f64,
440        change_frequency: FiniteF32,
441        last_dormancy_hours: Option<f64>,
442    ) -> DecayScore {
443        self.score_one_typed(age_hours, change_frequency, last_dormancy_hours, None)
444    }
445
446    /// Score with NodeType-specific half-life.
447    /// When `domain_config` is Some, uses domain-specific half-lives instead
448    /// of the hardcoded k_for_type() values (backward compat fallback).
449    pub fn score_one_typed(
450        &self,
451        age_hours: f64,
452        change_frequency: FiniteF32,
453        last_dormancy_hours: Option<f64>,
454        node_type: Option<NodeType>,
455    ) -> DecayScore {
456        self.score_one_with_domain(
457            age_hours,
458            change_frequency,
459            last_dormancy_hours,
460            node_type,
461            None,
462        )
463    }
464
465    /// Score with NodeType-specific half-life and optional DomainConfig override.
466    pub fn score_one_with_domain(
467        &self,
468        age_hours: f64,
469        change_frequency: FiniteF32,
470        last_dormancy_hours: Option<f64>,
471        node_type: Option<NodeType>,
472        domain_config: Option<&DomainConfig>,
473    ) -> DecayScore {
474        // FM-TMP-009: clamp negative age (future timestamp)
475        let age = age_hours.max(0.0);
476
477        // Use domain config half-life when provided, else fall back to hardcoded k_for_type
478        let k = match (domain_config, node_type) {
479            (Some(dc), Some(nt)) => LN2 / dc.half_life_for(nt),
480            (Some(dc), None) => LN2 / dc.default_half_life,
481            (None, Some(nt)) => Self::k_for_type(nt),
482            (None, None) => self.default_k.get(),
483        };
484
485        // Raw exponential decay
486        let raw = (-(age as f32) * k).exp().max(RAW_DECAY_FLOOR);
487        let raw_decay = FiniteF32::new(raw);
488
489        // DEC-004: resurrection with additive floor
490        let (resurrection, final_score) = match last_dormancy_hours {
491            Some(dormancy) if dormancy > DORMANT_HOURS => {
492                let dormancy_depth =
493                    (dormancy / (RESURRECTION_DORMANCY_THRESHOLD_DAYS as f64 * 24.0)) as f32;
494                let res = RESURRECTION_BASE_FLOOR
495                    + RESURRECTION_DEPTH_SCALE * (dormancy_depth + 1.0).ln();
496                let res_clamped = res.max(0.0).min(1.0);
497                let final_val = raw.max(res_clamped);
498                (FiniteF32::new(res_clamped), FiniteF32::new(final_val))
499            }
500            _ => (FiniteF32::ONE, raw_decay),
501        };
502
503        DecayScore {
504            node: NodeId::default(),
505            raw_decay,
506            resurrection_multiplier: resurrection,
507            final_score,
508        }
509    }
510
511    /// Score all nodes in the graph with per-NodeType half-lives.
512    /// Replaces: temporal_v2.py TemporalDecayResurrection.score()
513    pub fn score_all(&self, graph: &Graph, now_unix: f64) -> M1ndResult<Vec<DecayScore>> {
514        self.score_all_with_domain(graph, now_unix, None)
515    }
516
517    /// Score all nodes with optional DomainConfig override for half-lives.
518    pub fn score_all_with_domain(
519        &self,
520        graph: &Graph,
521        now_unix: f64,
522        domain_config: Option<&DomainConfig>,
523    ) -> M1ndResult<Vec<DecayScore>> {
524        let n = graph.num_nodes() as usize;
525        let mut scores = Vec::with_capacity(n);
526
527        for i in 0..n {
528            let last_mod = graph.nodes.last_modified[i];
529            let age_hours = (now_unix - last_mod) / 3600.0;
530            let freq = graph.nodes.change_frequency[i];
531            let nt = graph.nodes.node_type[i];
532
533            let mut ds = self.score_one_with_domain(age_hours, freq, None, Some(nt), domain_config);
534            ds.node = NodeId::new(i as u32);
535            scores.push(ds);
536        }
537
538        Ok(scores)
539    }
540}
541
542// ---------------------------------------------------------------------------
543// VelocityScorer — change velocity (temporal_v2.py VelocityScorer)
544// ---------------------------------------------------------------------------
545
546/// Velocity score for a single node.
547/// Replaces: temporal_v2.py VelocityScorer.score() per-node output
548#[derive(Clone, Copy, Debug)]
549pub struct VelocityScore {
550    pub node: NodeId,
551    pub velocity: FiniteF32,
552    pub trend: VelocityTrend,
553}
554
555#[derive(Clone, Copy, Debug, PartialEq, Eq)]
556pub enum VelocityTrend {
557    Accelerating,
558    Decelerating,
559    Stable,
560}
561
562/// Velocity scorer: measures rate of change frequency over time.
563/// Caches mean+stddev stats and invalidates when graph node count changes.
564/// Replaces: temporal_v2.py VelocityScorer
565pub struct VelocityScorer {
566    /// Cached (mean, stddev) stats and the node count they were computed for.
567    cached_stats: Option<(u32, f32, f32)>,
568}
569
570impl VelocityScorer {
571    pub fn new() -> Self {
572        Self { cached_stats: None }
573    }
574
575    /// Compute or retrieve cached mean and standard deviation of change frequencies.
576    /// Invalidates cache when graph node count changes (new nodes added).
577    fn frequency_stats(&mut self, graph: &Graph) -> (f32, f32) {
578        let n = graph.num_nodes();
579        if n == 0 {
580            return (0.0, 1.0);
581        }
582
583        // Check cache validity
584        if let Some((cached_n, mean, stddev)) = self.cached_stats {
585            if cached_n == n {
586                return (mean, stddev);
587            }
588        }
589
590        // Recompute
591        let n_usize = n as usize;
592        let mean: f32 = (0..n_usize)
593            .map(|j| graph.nodes.change_frequency[j].get())
594            .sum::<f32>()
595            / n_usize as f32;
596        let variance: f32 = (0..n_usize)
597            .map(|j| {
598                let d = graph.nodes.change_frequency[j].get() - mean;
599                d * d
600            })
601            .sum::<f32>()
602            / n_usize as f32;
603        let stddev = variance.sqrt().max(0.01); // floor to avoid div-by-zero
604
605        self.cached_stats = Some((n, mean, stddev));
606        (mean, stddev)
607    }
608
609    /// Compute stats without caching (static helper for backward compat).
610    fn frequency_stats_static(graph: &Graph) -> (f32, f32) {
611        let n = graph.num_nodes() as usize;
612        if n == 0 {
613            return (0.0, 1.0);
614        }
615        let mean: f32 = (0..n)
616            .map(|j| graph.nodes.change_frequency[j].get())
617            .sum::<f32>()
618            / n as f32;
619        let variance: f32 = (0..n)
620            .map(|j| {
621                let d = graph.nodes.change_frequency[j].get() - mean;
622                d * d
623            })
624            .sum::<f32>()
625            / n as f32;
626        let stddev = variance.sqrt().max(0.01);
627        (mean, stddev)
628    }
629
630    /// Score all nodes using z-score velocity. Returns nodes with |z| > 0.1.
631    /// Replaces: temporal_v2.py VelocityScorer.score()
632    pub fn score_all(graph: &Graph, _now_unix: f64) -> M1ndResult<Vec<VelocityScore>> {
633        let n = graph.num_nodes() as usize;
634        let (mean, stddev) = Self::frequency_stats_static(graph);
635        let mut scores = Vec::new();
636
637        for i in 0..n {
638            let freq = graph.nodes.change_frequency[i].get();
639            let z = (freq - mean) / stddev;
640            let trend = if z > 0.5 {
641                VelocityTrend::Accelerating
642            } else if z < -0.5 {
643                VelocityTrend::Decelerating
644            } else {
645                VelocityTrend::Stable
646            };
647
648            if z.abs() > 0.1 {
649                scores.push(VelocityScore {
650                    node: NodeId::new(i as u32),
651                    velocity: FiniteF32::new(z),
652                    trend,
653                });
654            }
655        }
656
657        Ok(scores)
658    }
659
660    /// Score all nodes using cached stats. Prefer this when scorer is reused.
661    pub fn score_all_cached(
662        &mut self,
663        graph: &Graph,
664        _now_unix: f64,
665    ) -> M1ndResult<Vec<VelocityScore>> {
666        let n = graph.num_nodes() as usize;
667        let (mean, stddev) = self.frequency_stats(graph);
668        let mut scores = Vec::new();
669
670        for i in 0..n {
671            let freq = graph.nodes.change_frequency[i].get();
672            let z = (freq - mean) / stddev;
673            let trend = if z > 0.5 {
674                VelocityTrend::Accelerating
675            } else if z < -0.5 {
676                VelocityTrend::Decelerating
677            } else {
678                VelocityTrend::Stable
679            };
680
681            if z.abs() > 0.1 {
682                scores.push(VelocityScore {
683                    node: NodeId::new(i as u32),
684                    velocity: FiniteF32::new(z),
685                    trend,
686                });
687            }
688        }
689
690        Ok(scores)
691    }
692
693    /// Score a single node using z-score.
694    pub fn score_one(graph: &Graph, node: NodeId, _now_unix: f64) -> M1ndResult<VelocityScore> {
695        let idx = node.as_usize();
696        let n = graph.num_nodes() as usize;
697        let (mean, stddev) = Self::frequency_stats_static(graph);
698        let freq = if idx < n {
699            graph.nodes.change_frequency[idx].get()
700        } else {
701            0.0
702        };
703        let z = (freq - mean) / stddev;
704        let trend = if z > 0.5 {
705            VelocityTrend::Accelerating
706        } else if z < -0.5 {
707            VelocityTrend::Decelerating
708        } else {
709            VelocityTrend::Stable
710        };
711        Ok(VelocityScore {
712            node,
713            velocity: FiniteF32::new(z),
714            trend,
715        })
716    }
717
718    /// Invalidate the cached stats (call when graph structure changes).
719    pub fn invalidate_cache(&mut self) {
720        self.cached_stats = None;
721    }
722}
723
724// ---------------------------------------------------------------------------
725// ImpactRadius — blast radius calculation (temporal_v2.py ImpactRadiusCalculator)
726// ---------------------------------------------------------------------------
727
728/// Impact result for a single downstream node.
729/// Replaces: temporal_v2.py ImpactResult
730#[derive(Clone, Debug)]
731pub struct ImpactEntry {
732    pub node: NodeId,
733    pub signal_strength: FiniteF32,
734    pub hop_distance: u8,
735}
736
737/// Impact radius result.
738/// Replaces: temporal_v2.py ImpactRadiusCalculator.compute() return
739#[derive(Clone, Debug)]
740pub struct ImpactResult {
741    pub source: NodeId,
742    /// Nodes in blast radius sorted by signal strength descending.
743    pub blast_radius: Vec<ImpactEntry>,
744    /// Total energy dispersed.
745    pub total_energy: FiniteF32,
746    /// Maximum hops reached.
747    pub max_hops_reached: u8,
748}
749
750/// Impact radius calculator. BFS from source along causal edges.
751/// Replaces: temporal_v2.py ImpactRadiusCalculator
752pub struct ImpactRadiusCalculator {
753    max_hops: u8,
754    min_signal: FiniteF32,
755}
756
757impl ImpactRadiusCalculator {
758    pub fn new(max_hops: u8, min_signal: FiniteF32) -> Self {
759        Self {
760            max_hops,
761            min_signal,
762        }
763    }
764
765    /// Compute impact radius from source. Direction: forward, reverse, or both.
766    /// DEC-009: sum-within-hop (superposition), max-across-hops (strongest arrival wins).
767    /// Replaces: temporal_v2.py ImpactRadiusCalculator.compute()
768    pub fn compute(
769        &self,
770        graph: &Graph,
771        source: NodeId,
772        direction: ImpactDirection,
773    ) -> M1ndResult<ImpactResult> {
774        let n = graph.num_nodes() as usize;
775        if source.as_usize() >= n {
776            return Ok(ImpactResult {
777                source,
778                blast_radius: Vec::new(),
779                total_energy: FiniteF32::ZERO,
780                max_hops_reached: 0,
781            });
782        }
783
784        let mut best_signal = vec![0.0f32; n];
785        let mut hop_dist = vec![u8::MAX; n];
786        best_signal[source.as_usize()] = 1.0;
787        hop_dist[source.as_usize()] = 0;
788
789        let decay = 0.55f32; // Use default decay
790        let mut frontier = vec![source];
791        let mut max_hops = 0u8;
792
793        for depth in 0..self.max_hops {
794            if frontier.is_empty() {
795                break;
796            }
797            max_hops = depth + 1;
798            let mut next = Vec::new();
799
800            for &node in &frontier {
801                let signal = best_signal[node.as_usize()];
802
803                // Forward edges
804                if direction != ImpactDirection::Reverse {
805                    let range = graph.csr.out_range(node);
806                    for j in range {
807                        let tgt = graph.csr.targets[j];
808                        let w = graph.csr.read_weight(EdgeIdx::new(j as u32)).get();
809                        let new_signal = signal * w * decay;
810                        let tgt_idx = tgt.as_usize();
811                        if tgt_idx < n && new_signal > self.min_signal.get() {
812                            // DEC-009: max-across-hops
813                            if new_signal > best_signal[tgt_idx] {
814                                best_signal[tgt_idx] = new_signal;
815                                hop_dist[tgt_idx] = depth + 1;
816                                next.push(tgt);
817                            }
818                        }
819                    }
820                }
821
822                // Reverse edges
823                if direction != ImpactDirection::Forward {
824                    let range = graph.csr.in_range(node);
825                    for j in range {
826                        let rev_src = graph.csr.rev_sources[j];
827                        let fwd_idx = graph.csr.rev_edge_idx[j];
828                        let w = graph.csr.read_weight(fwd_idx).get();
829                        let new_signal = signal * w * decay;
830                        let idx = rev_src.as_usize();
831                        if idx < n && new_signal > self.min_signal.get() {
832                            if new_signal > best_signal[idx] {
833                                best_signal[idx] = new_signal;
834                                hop_dist[idx] = depth + 1;
835                                next.push(rev_src);
836                            }
837                        }
838                    }
839                }
840            }
841
842            frontier = next;
843        }
844
845        let mut blast_radius: Vec<ImpactEntry> = (0..n)
846            .filter(|&i| i != source.as_usize() && best_signal[i] > self.min_signal.get())
847            .map(|i| ImpactEntry {
848                node: NodeId::new(i as u32),
849                signal_strength: FiniteF32::new(best_signal[i]),
850                hop_distance: hop_dist[i],
851            })
852            .collect();
853
854        blast_radius.sort_by(|a, b| b.signal_strength.cmp(&a.signal_strength));
855        let total_energy: f32 = blast_radius.iter().map(|e| e.signal_strength.get()).sum();
856
857        Ok(ImpactResult {
858            source,
859            blast_radius,
860            total_energy: FiniteF32::new(total_energy),
861            max_hops_reached: max_hops,
862        })
863    }
864}
865
866#[derive(Clone, Copy, Debug, PartialEq, Eq)]
867pub enum ImpactDirection {
868    Forward,
869    Reverse,
870    Both,
871}
872
873// ---------------------------------------------------------------------------
874// TemporalEngine — facade combining all temporal capabilities
875// Replaces: temporal_v2.py TemporalPredictor
876// ---------------------------------------------------------------------------
877
878/// Facade for all temporal analysis capabilities.
879/// Replaces: temporal_v2.py TemporalPredictor
880pub struct TemporalEngine {
881    pub co_change: CoChangeMatrix,
882    pub chain_detector: CausalChainDetector,
883    pub decay_scorer: TemporalDecayScorer,
884    pub impact_calculator: ImpactRadiusCalculator,
885}
886
887impl TemporalEngine {
888    /// Build from graph with default parameters.
889    /// Replaces: temporal_v2.py TemporalPredictor.__init__()
890    pub fn build(graph: &Graph) -> M1ndResult<Self> {
891        let co_change = CoChangeMatrix::bootstrap(graph, DEFAULT_MATRIX_BUDGET)?;
892        let chain_detector = CausalChainDetector::with_defaults();
893        let decay_scorer = TemporalDecayScorer::new(PosF32::new(DEFAULT_HALF_LIFE_HOURS).unwrap());
894        let impact_calculator = ImpactRadiusCalculator::new(5, FiniteF32::new(0.01));
895
896        Ok(Self {
897            co_change,
898            chain_detector,
899            decay_scorer,
900            impact_calculator,
901        })
902    }
903
904    /// Populate co-change matrix from git commit groups.
905    /// Call after build() with commit groups from ingestion.
906    pub fn populate_co_change(
907        &mut self,
908        graph: &Graph,
909        commit_groups: &[Vec<String>],
910    ) -> M1ndResult<()> {
911        self.co_change
912            .populate_from_commit_groups(graph, commit_groups)
913    }
914
915    /// Full temporal report for a node: co-change predictions + causal chains
916    /// + decay score + velocity + impact radius.
917    /// Replaces: temporal_v2.py TemporalPredictor.full_report()
918    pub fn full_report(
919        &self,
920        graph: &Graph,
921        node: NodeId,
922        now_unix: f64,
923    ) -> M1ndResult<TemporalReport> {
924        let co_change_predictions = self.co_change.predict(node, 10);
925        let causal_chains = self.chain_detector.detect(graph, node)?;
926
927        let idx = node.as_usize();
928        let n = graph.num_nodes() as usize;
929        let last_mod = if idx < n {
930            graph.nodes.last_modified[idx]
931        } else {
932            0.0
933        };
934        let age_hours = (now_unix - last_mod) / 3600.0;
935        let freq = if idx < n {
936            graph.nodes.change_frequency[idx]
937        } else {
938            FiniteF32::ZERO
939        };
940        let nt = if idx < n {
941            Some(graph.nodes.node_type[idx])
942        } else {
943            None
944        };
945        let mut decay = self.decay_scorer.score_one_typed(age_hours, freq, None, nt);
946        decay.node = node;
947
948        let velocity = VelocityScorer::score_one(graph, node, now_unix)?;
949        let impact = self
950            .impact_calculator
951            .compute(graph, node, ImpactDirection::Both)?;
952
953        Ok(TemporalReport {
954            node,
955            co_change_predictions,
956            causal_chains,
957            decay,
958            velocity,
959            impact,
960        })
961    }
962}
963
964/// Complete temporal analysis for one node.
965#[derive(Clone, Debug)]
966pub struct TemporalReport {
967    pub node: NodeId,
968    pub co_change_predictions: Vec<CoChangeEntry>,
969    pub causal_chains: Vec<CausalChain>,
970    pub decay: DecayScore,
971    pub velocity: VelocityScore,
972    pub impact: ImpactResult,
973}
974
975// Ensure Send + Sync.
976static_assertions::assert_impl_all!(TemporalEngine: Send, Sync);