pub mod attractor;
pub mod causal;
pub mod cognitive_engine;
pub mod morphogenetic;
pub mod network;
pub mod neuron;
pub mod optimizer;
pub mod strange_loop;
pub mod synapse;
pub mod time_crystal;
pub use attractor::{AttractorConfig, AttractorDynamics, EnergyLandscape};
pub use causal::{CausalConfig, CausalDiscoverySNN, CausalGraph, CausalRelation};
pub use cognitive_engine::{CognitiveMinCutEngine, EngineConfig, EngineMetrics, OperationMode};
pub use morphogenetic::{GrowthRules, MorphConfig, MorphogeneticSNN, TuringPattern};
pub use network::{LayerConfig, NetworkConfig, SpikingNetwork};
pub use neuron::{LIFNeuron, NeuronConfig, NeuronState, SpikeTrain};
pub use optimizer::{
NeuralGraphOptimizer, OptimizationResult, OptimizerConfig, PolicySNN, ValueNetwork,
};
pub use strange_loop::{MetaAction, MetaCognitiveMinCut, MetaLevel, StrangeLoopConfig};
pub use synapse::{STDPConfig, Synapse, SynapseMatrix};
pub use time_crystal::{CPGConfig, OscillatorNeuron, PhaseTopology, TimeCrystalCPG};
use crate::graph::{DynamicGraph, EdgeId, VertexId, Weight};
use std::time::{Duration, Instant};
pub type SimTime = f64;
#[derive(Debug, Clone, Copy, PartialEq)]
pub struct Spike {
pub neuron_id: usize,
pub time: SimTime,
}
pub type Vector = Vec<f64>;
#[derive(Debug, Clone)]
pub struct SNNMinCutConfig {
pub dt: f64,
pub num_neurons: usize,
pub enable_attractors: bool,
pub enable_strange_loop: bool,
pub enable_causal_discovery: bool,
pub enable_time_crystal: bool,
pub enable_morphogenetic: bool,
pub enable_optimizer: bool,
}
impl Default for SNNMinCutConfig {
fn default() -> Self {
Self {
dt: 1.0, num_neurons: 1000,
enable_attractors: true,
enable_strange_loop: true,
enable_causal_discovery: true,
enable_time_crystal: true,
enable_morphogenetic: true,
enable_optimizer: true,
}
}
}
#[derive(Debug, Clone)]
pub struct SpikeComputeResult {
pub spikes: Vec<Spike>,
pub energy: f64,
pub duration: Duration,
pub mincut_value: Option<f64>,
}
pub trait SpikeToGraph {
fn spikes_to_weights(&self, spikes: &[Spike], graph: &mut DynamicGraph);
fn graph_to_spike_rates(&self, graph: &DynamicGraph) -> Vec<f64>;
}
pub trait GraphToSpike {
fn weight_to_current(&self, weight: Weight) -> f64;
fn degree_to_threshold(&self, degree: usize) -> f64;
}
#[derive(Debug, Clone, Default)]
pub struct DefaultSpikeGraphTransducer {
pub weight_factor: f64,
pub current_factor: f64,
pub threshold_scale: f64,
}
impl DefaultSpikeGraphTransducer {
pub fn new() -> Self {
Self {
weight_factor: 0.01,
current_factor: 10.0,
threshold_scale: 0.5,
}
}
}
impl SpikeToGraph for DefaultSpikeGraphTransducer {
fn spikes_to_weights(&self, spikes: &[Spike], graph: &mut DynamicGraph) {
let mut spike_counts: std::collections::HashMap<usize, usize> =
std::collections::HashMap::new();
for spike in spikes {
*spike_counts.entry(spike.neuron_id).or_insert(0) += 1;
}
for edge in graph.edges() {
let src_spikes = spike_counts
.get(&(edge.source as usize))
.copied()
.unwrap_or(0);
let tgt_spikes = spike_counts
.get(&(edge.target as usize))
.copied()
.unwrap_or(0);
let correlation = (src_spikes * tgt_spikes) as f64;
let delta_w = self.weight_factor * correlation;
if delta_w > 0.0 {
let new_weight = edge.weight + delta_w;
let _ = graph.update_edge_weight(edge.source, edge.target, new_weight);
}
}
}
fn graph_to_spike_rates(&self, graph: &DynamicGraph) -> Vec<f64> {
let vertices = graph.vertices();
let mut rates = vec![0.0; vertices.len()];
for (i, v) in vertices.iter().enumerate() {
let degree = graph.degree(*v);
let weight_sum: f64 = graph
.neighbors(*v)
.iter()
.filter_map(|(_, eid)| {
graph
.edges()
.iter()
.find(|e| e.id == *eid)
.map(|e| e.weight)
})
.sum();
rates[i] = (degree as f64 + weight_sum) * 0.01;
}
rates
}
}
impl GraphToSpike for DefaultSpikeGraphTransducer {
fn weight_to_current(&self, weight: Weight) -> f64 {
self.current_factor * weight
}
fn degree_to_threshold(&self, degree: usize) -> f64 {
if degree == 0 {
return 1.0;
}
1.0 + self.threshold_scale * (degree as f64).ln()
}
}
const MAX_SYNCHRONY_SPIKES: usize = 10_000;
pub fn compute_synchrony(spikes: &[Spike], window_ms: f64) -> f64 {
if spikes.len() < 2 {
return 0.0;
}
let spikes = if spikes.len() > MAX_SYNCHRONY_SPIKES {
&spikes[..MAX_SYNCHRONY_SPIKES]
} else {
spikes
};
let mut sorted: Vec<_> = spikes.to_vec();
sorted.sort_by(|a, b| {
a.time
.partial_cmp(&b.time)
.unwrap_or(std::cmp::Ordering::Equal)
});
let mut coincidences = 0usize;
let mut window_start = 0;
for i in 0..sorted.len() {
while window_start < i && sorted[i].time - sorted[window_start].time > window_ms {
window_start += 1;
}
for j in window_start..i {
if sorted[i].neuron_id != sorted[j].neuron_id {
coincidences += 1;
}
}
}
let n = sorted.len();
let mut neuron_counts: std::collections::HashMap<usize, usize> =
std::collections::HashMap::new();
for spike in &sorted {
*neuron_counts.entry(spike.neuron_id).or_insert(0) += 1;
}
let total_inter_pairs: usize = {
let total = n * (n - 1) / 2;
let intra: usize = neuron_counts.values().map(|&c| c * (c - 1) / 2).sum();
total - intra
};
if total_inter_pairs == 0 {
0.0
} else {
coincidences as f64 / total_inter_pairs as f64
}
}
pub fn compute_energy(mincut: f64, synchrony: f64) -> f64 {
-mincut - synchrony
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_default_config() {
let config = SNNMinCutConfig::default();
assert_eq!(config.dt, 1.0);
assert!(config.enable_attractors);
}
#[test]
fn test_synchrony_computation() {
let spikes = vec![
Spike {
neuron_id: 0,
time: 0.0,
},
Spike {
neuron_id: 1,
time: 0.5,
},
Spike {
neuron_id: 2,
time: 10.0,
},
];
let sync_narrow = compute_synchrony(&spikes, 1.0);
let sync_wide = compute_synchrony(&spikes, 20.0);
assert!(sync_wide >= sync_narrow);
}
#[test]
fn test_energy_function() {
let energy = compute_energy(10.0, 0.5);
assert!(energy < 0.0);
let energy2 = compute_energy(20.0, 0.8);
assert!(energy2 < energy);
}
#[test]
fn test_spike_train() {
let spike = Spike {
neuron_id: 42,
time: 100.5,
};
assert_eq!(spike.neuron_id, 42);
assert!((spike.time - 100.5).abs() < 1e-10);
}
}