use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
use std::time::Instant;
use crate::error::M1ndResult;
use crate::graph::Graph;
use crate::types::*;
pub struct BloomFilter {
bits: Vec<u64>,
num_bits: usize,
num_hashes: u32,
}
impl BloomFilter {
pub fn with_capacity(expected_items: usize, fpr: f64) -> Self {
let expected = expected_items.max(1);
let m = (-(expected as f64) * fpr.ln() / (2.0f64.ln().powi(2))) as usize;
let num_bits = m.max(64);
let num_words = num_bits.div_ceil(64);
let k = ((num_bits as f64 / expected as f64) * 2.0f64.ln()).ceil() as u32;
let num_hashes = k.clamp(1, 16);
Self {
bits: vec![0u64; num_words],
num_bits,
num_hashes,
}
}
#[inline]
fn compute_hashes(&self, item: u32, out: &mut [usize; 16]) -> u32 {
let h1 = item.wrapping_mul(2654435761) as usize;
let h2 = item.wrapping_mul(2246822519).wrapping_add(1) as usize;
let m = self.num_bits;
let k = self.num_hashes.min(16);
for (i, slot) in out.iter_mut().enumerate().take(k as usize) {
*slot = h1.wrapping_add(i.wrapping_mul(h2)) % m;
}
k
}
pub fn insert(&mut self, item: NodeId) {
let mut hashes = [0usize; 16];
let k = self.compute_hashes(item.0, &mut hashes);
for &h in hashes.iter().take(k as usize) {
self.bits[h >> 6] |= 1u64 << (h & 63);
}
}
pub fn probably_contains(&self, item: NodeId) -> bool {
let mut hashes = [0usize; 16];
let k = self.compute_hashes(item.0, &mut hashes);
for &h in hashes.iter().take(k as usize) {
if self.bits[h >> 6] & (1u64 << (h & 63)) == 0 {
return false;
}
}
true
}
pub fn clear(&mut self) {
self.bits.fill(0);
}
}
#[derive(Clone, Debug)]
pub struct ActivatedNode {
pub node: NodeId,
pub activation: FiniteF32,
pub dimensions: [FiniteF32; 4],
pub active_dimension_count: u8,
}
#[derive(Clone, Debug)]
pub struct ActivationResult {
pub activated: Vec<ActivatedNode>,
pub seeds: Vec<(NodeId, FiniteF32)>,
pub elapsed_ns: u64,
pub xlr_fallback_used: bool,
}
#[derive(Clone, Debug)]
pub struct DimensionResult {
pub scores: Vec<(NodeId, FiniteF32)>,
pub dimension: Dimension,
pub elapsed_ns: u64,
}
pub trait ActivationEngine: Send + Sync {
fn propagate(
&self,
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
config: &PropagationConfig,
) -> M1ndResult<DimensionResult>;
}
pub struct WavefrontEngine;
impl Default for WavefrontEngine {
fn default() -> Self {
Self
}
}
impl WavefrontEngine {
pub fn new() -> Self {
Self
}
}
impl ActivationEngine for WavefrontEngine {
fn propagate(
&self,
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
config: &PropagationConfig,
) -> M1ndResult<DimensionResult> {
let start = Instant::now();
let n = graph.num_nodes() as usize;
if n == 0 || seeds.is_empty() {
return Ok(DimensionResult {
scores: Vec::new(),
dimension: Dimension::Structural,
elapsed_ns: start.elapsed().as_nanos() as u64,
});
}
let threshold = config.threshold.get();
let decay = config.decay.get();
let max_depth = config.max_depth.min(20) as usize;
let mut activation = vec![0.0f32; n];
let mut visited = vec![false; n];
let mut frontier: Vec<NodeId> = Vec::new();
for &(node, score) in seeds {
let idx = node.as_usize();
if idx < n {
let s = score.get().min(config.saturation_cap.get());
if s > activation[idx] {
activation[idx] = s;
}
if !visited[idx] {
frontier.push(node);
visited[idx] = true;
}
}
}
for _depth in 0..max_depth {
if frontier.is_empty() {
break;
}
let mut next_frontier: Vec<NodeId> = Vec::new();
for &src in &frontier {
let src_act = activation[src.as_usize()];
if src_act < threshold {
continue;
}
let range = graph.csr.out_range(src);
for j in range {
let tgt = graph.csr.targets[j];
let w = graph.csr.read_weight(EdgeIdx::new(j as u32)).get();
let is_inhib = graph.csr.inhibitory[j];
let mut signal = src_act * w * decay;
if is_inhib {
signal = -signal * config.inhibitory_factor.get();
}
let tgt_idx = tgt.as_usize();
if tgt_idx >= n {
continue;
}
if !is_inhib && signal > threshold {
if signal > activation[tgt_idx] {
activation[tgt_idx] = signal;
}
if !visited[tgt_idx] {
visited[tgt_idx] = true;
next_frontier.push(tgt);
}
} else if is_inhib {
activation[tgt_idx] = (activation[tgt_idx] + signal).max(0.0);
}
}
}
frontier = next_frontier;
}
let mut scores: Vec<(NodeId, FiniteF32)> = activation
.iter()
.enumerate()
.filter(|(_, &v)| v > 0.0)
.map(|(i, &v)| (NodeId::new(i as u32), FiniteF32::new(v)))
.collect();
scores.sort_by(|a, b| b.1.cmp(&a.1));
Ok(DimensionResult {
scores,
dimension: Dimension::Structural,
elapsed_ns: start.elapsed().as_nanos() as u64,
})
}
}
#[derive(Clone, Copy)]
struct HeapEntry {
node: NodeId,
activation: f32,
}
impl PartialEq for HeapEntry {
fn eq(&self, other: &Self) -> bool {
self.activation == other.activation
}
}
impl Eq for HeapEntry {}
impl PartialOrd for HeapEntry {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for HeapEntry {
fn cmp(&self, other: &Self) -> Ordering {
self.activation.total_cmp(&other.activation)
}
}
pub struct HeapEngine;
impl Default for HeapEngine {
fn default() -> Self {
Self
}
}
impl HeapEngine {
pub fn new() -> Self {
Self
}
}
impl ActivationEngine for HeapEngine {
fn propagate(
&self,
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
config: &PropagationConfig,
) -> M1ndResult<DimensionResult> {
let start = Instant::now();
let n = graph.num_nodes() as usize;
if n == 0 || seeds.is_empty() {
return Ok(DimensionResult {
scores: Vec::new(),
dimension: Dimension::Structural,
elapsed_ns: start.elapsed().as_nanos() as u64,
});
}
let threshold = config.threshold.get();
let decay = config.decay.get();
let mut activation = vec![0.0f32; n];
let mut bloom = BloomFilter::with_capacity(n, 0.01);
let mut heap = BinaryHeap::new();
for &(node, score) in seeds {
let idx = node.as_usize();
if idx < n {
let s = score.get().min(config.saturation_cap.get());
activation[idx] = s;
heap.push(HeapEntry {
node,
activation: s,
});
bloom.insert(node);
}
}
let mut depth_counter = 0u32;
let max_ops = (n as u32)
.saturating_mul(config.max_depth as u32)
.max(10000);
while let Some(entry) = heap.pop() {
if entry.activation < threshold {
break; }
depth_counter += 1;
if depth_counter > max_ops {
break;
}
let src = entry.node;
let src_act = activation[src.as_usize()];
let range = graph.csr.out_range(src);
for j in range {
let tgt = graph.csr.targets[j];
let w = graph.csr.read_weight(EdgeIdx::new(j as u32)).get();
let is_inhib = graph.csr.inhibitory[j];
let mut signal = src_act * w * decay;
if is_inhib {
signal = -signal * config.inhibitory_factor.get();
}
let tgt_idx = tgt.as_usize();
if tgt_idx >= n {
continue;
}
if !is_inhib && signal > threshold && signal > activation[tgt_idx] {
activation[tgt_idx] = signal;
if !bloom.probably_contains(tgt) {
bloom.insert(tgt);
heap.push(HeapEntry {
node: tgt,
activation: signal,
});
}
} else if is_inhib {
activation[tgt_idx] = (activation[tgt_idx] + signal).max(0.0);
}
}
}
let mut scores: Vec<(NodeId, FiniteF32)> = activation
.iter()
.enumerate()
.filter(|(_, &v)| v > 0.0)
.map(|(i, &v)| (NodeId::new(i as u32), FiniteF32::new(v)))
.collect();
scores.sort_by(|a, b| b.1.cmp(&a.1));
Ok(DimensionResult {
scores,
dimension: Dimension::Structural,
elapsed_ns: start.elapsed().as_nanos() as u64,
})
}
}
pub struct HybridEngine {
wavefront: WavefrontEngine,
heap: HeapEngine,
}
impl Default for HybridEngine {
fn default() -> Self {
Self::new()
}
}
impl HybridEngine {
pub fn new() -> Self {
Self {
wavefront: WavefrontEngine::new(),
heap: HeapEngine::new(),
}
}
fn prefer_heap(graph: &Graph, seed_count: usize) -> bool {
let seed_ratio = seed_count as f64 / graph.num_nodes().max(1) as f64;
seed_ratio < 0.001 && graph.avg_degree() < 8.0
}
}
impl ActivationEngine for HybridEngine {
fn propagate(
&self,
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
config: &PropagationConfig,
) -> M1ndResult<DimensionResult> {
if Self::prefer_heap(graph, seeds.len()) {
self.heap.propagate(graph, seeds, config)
} else {
self.wavefront.propagate(graph, seeds, config)
}
}
}
pub fn activate_semantic(
_graph: &Graph,
semantic: &crate::semantic::SemanticEngine,
query: &str,
top_k: usize,
) -> M1ndResult<DimensionResult> {
let start = Instant::now();
let scores = semantic.query_fast(_graph, query, top_k)?;
Ok(DimensionResult {
scores,
dimension: Dimension::Semantic,
elapsed_ns: start.elapsed().as_nanos() as u64,
})
}
pub fn activate_temporal(
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
weights: &TemporalWeights,
) -> M1ndResult<DimensionResult> {
let start = Instant::now();
let n = graph.num_nodes() as usize;
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_secs_f64())
.unwrap_or(0.0);
let half_life_secs = 168.0 * 3600.0; let k = std::f64::consts::LN_2 / half_life_secs;
let mut scores = Vec::new();
for &(node, seed_strength) in seeds {
let idx = node.as_usize();
if idx >= n {
continue;
}
let last_mod = graph.nodes.last_modified[idx];
let age_secs = (now - last_mod).max(0.0);
let recency = (-k * age_secs).exp() as f32;
let frequency = graph.nodes.change_frequency[idx].get();
let score = recency * weights.recency.get() + frequency * weights.frequency.get();
let combined = score * seed_strength.get();
if combined > 0.0 {
scores.push((node, FiniteF32::new(combined)));
}
}
scores.sort_by(|a, b| b.1.cmp(&a.1));
Ok(DimensionResult {
scores,
dimension: Dimension::Temporal,
elapsed_ns: start.elapsed().as_nanos() as u64,
})
}
pub fn activate_causal(
graph: &Graph,
seeds: &[(NodeId, FiniteF32)],
config: &PropagationConfig,
) -> M1ndResult<DimensionResult> {
let start = Instant::now();
let n = graph.num_nodes() as usize;
if n == 0 || seeds.is_empty() {
return Ok(DimensionResult {
scores: Vec::new(),
dimension: Dimension::Causal,
elapsed_ns: start.elapsed().as_nanos() as u64,
});
}
let threshold = config.threshold.get();
let decay = config.decay.get();
let max_depth = config.max_depth.min(20) as usize;
let mut activation = vec![0.0f32; n];
let mut frontier: Vec<NodeId> = Vec::new();
let mut visited = vec![false; n];
for &(node, score) in seeds {
let idx = node.as_usize();
if idx < n {
activation[idx] = score.get();
if !visited[idx] {
frontier.push(node);
visited[idx] = true;
}
}
}
for _depth in 0..max_depth {
if frontier.is_empty() {
break;
}
let mut next_frontier = Vec::new();
for &src in &frontier {
let src_act = activation[src.as_usize()];
if src_act < threshold {
continue;
}
let range = graph.csr.out_range(src);
for j in range {
let causal = graph.csr.causal_strengths[j].get();
if causal <= 0.0 {
continue; }
let tgt = graph.csr.targets[j];
let w = graph.csr.read_weight(EdgeIdx::new(j as u32)).get();
let signal = src_act * w * causal * decay;
let tgt_idx = tgt.as_usize();
if tgt_idx < n && signal > threshold && signal > activation[tgt_idx] {
activation[tgt_idx] = signal;
if !visited[tgt_idx] {
visited[tgt_idx] = true;
next_frontier.push(tgt);
}
}
}
}
frontier = next_frontier;
}
let mut back_frontier: Vec<NodeId> = Vec::new();
let mut back_visited = vec![false; n];
for &(node, _) in seeds {
let idx = node.as_usize();
if idx < n && !back_visited[idx] {
back_frontier.push(node);
back_visited[idx] = true;
}
}
for _depth in 0..max_depth {
if back_frontier.is_empty() {
break;
}
let mut next = Vec::new();
for &src in &back_frontier {
let src_act = activation[src.as_usize()]
.max(seeds.iter().find(|s| s.0 == src).map_or(0.0, |s| s.1.get()));
if src_act < threshold {
continue;
}
let range = graph.csr.in_range(src);
for j in range {
let fwd_idx = graph.csr.rev_edge_idx[j];
let causal = graph.csr.causal_strengths[fwd_idx.as_usize()].get();
if causal <= 0.0 {
continue;
}
let rev_src = graph.csr.rev_sources[j];
let w = graph.csr.read_weight(fwd_idx).get();
let signal = src_act * w * causal * decay * 0.7; let idx = rev_src.as_usize();
if idx < n && signal > threshold && signal > activation[idx] {
activation[idx] = signal;
if !back_visited[idx] {
back_visited[idx] = true;
next.push(rev_src);
}
}
}
}
back_frontier = next;
}
let mut scores: Vec<(NodeId, FiniteF32)> = activation
.iter()
.enumerate()
.filter(|(_, &v)| v > 0.0)
.map(|(i, &v)| (NodeId::new(i as u32), FiniteF32::new(v)))
.collect();
scores.sort_by(|a, b| b.1.cmp(&a.1));
Ok(DimensionResult {
scores,
dimension: Dimension::Causal,
elapsed_ns: start.elapsed().as_nanos() as u64,
})
}
const PAGERANK_BOOST: f32 = 0.1;
const DIM_CONTRIBUTION_THRESHOLD: f32 = 0.01;
pub fn merge_dimensions(
results: &[DimensionResult; 4],
top_k: usize,
) -> M1ndResult<ActivationResult> {
let start = Instant::now();
let mut weights = DIMENSION_WEIGHTS;
let mut total_active_weight = 0.0f32;
let mut active_mask = [false; 4];
for (i, r) in results.iter().enumerate() {
if !r.scores.is_empty() {
active_mask[i] = true;
total_active_weight += weights[i];
}
}
if total_active_weight > 0.0 && total_active_weight < 1.0 {
for i in 0..4 {
if active_mask[i] {
weights[i] /= total_active_weight;
} else {
weights[i] = 0.0;
}
}
}
let mut node_scores: HashMap<u32, [f32; 4]> = HashMap::new();
for (dim_idx, result) in results.iter().enumerate() {
for &(node, score) in &result.scores {
let entry = node_scores.entry(node.0).or_insert([0.0; 4]);
entry[dim_idx] = score.get();
}
}
let mut activated: Vec<ActivatedNode> = node_scores
.iter()
.map(|(&node_id, dims)| {
let mut combined = 0.0f32;
let mut dim_count = 0u8;
for i in 0..4 {
combined += dims[i] * weights[i];
if dims[i] > DIM_CONTRIBUTION_THRESHOLD {
dim_count += 1;
}
}
if dim_count >= 4 {
combined *= RESONANCE_BONUS_4DIM;
} else if dim_count >= 3 {
combined *= RESONANCE_BONUS_3DIM;
}
ActivatedNode {
node: NodeId::new(node_id),
activation: FiniteF32::new(combined),
dimensions: [
FiniteF32::new(dims[0]),
FiniteF32::new(dims[1]),
FiniteF32::new(dims[2]),
FiniteF32::new(dims[3]),
],
active_dimension_count: dim_count,
}
})
.collect();
activated.sort_by(|a, b| b.activation.cmp(&a.activation));
activated.truncate(top_k);
Ok(ActivationResult {
activated,
seeds: Vec::new(), elapsed_ns: start.elapsed().as_nanos() as u64,
xlr_fallback_used: false,
})
}