ruvector-mincut 2.0.6

World's first subpolynomial dynamic min-cut: self-healing networks, AI optimization, real-time graph analysis
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
//! # Layer 2: Strange Loop Self-Modification Protocol
//!
//! Implements recursive self-observation for meta-cognitive graph optimization.
//!
//! ## Hierarchical Levels
//!
//! - **Level 0**: Object Graph - computational units and data flow
//! - **Level 1**: Meta-Graph - observes Level 0 statistics
//! - **Level 2**: Meta-Meta-Graph - observes learning dynamics
//!
//! The "strange loop" closes when Level 2 actions modify Level 0 structure,
//! which changes Level 1 observations, which triggers Level 2 re-evaluation.

use super::{
    network::{LayerConfig, NetworkConfig, SpikingNetwork},
    neuron::{LIFNeuron, NeuronConfig, NeuronPopulation},
    SimTime, Spike,
};
use crate::graph::{DynamicGraph, VertexId};
use std::collections::VecDeque;

/// Configuration for strange loop system
#[derive(Debug, Clone)]
pub struct StrangeLoopConfig {
    /// Number of Level 0 neurons (matches graph vertices)
    pub level0_size: usize,
    /// Number of Level 1 observer neurons
    pub level1_size: usize,
    /// Number of Level 2 meta-neurons
    pub level2_size: usize,
    /// Time step for simulation
    pub dt: f64,
    /// Threshold for strengthen action
    pub strengthen_threshold: f64,
    /// Threshold for prune action
    pub prune_threshold: f64,
    /// Minimum mincut contribution to keep edge
    pub prune_weight_threshold: f64,
    /// History window for observations
    pub observation_window: usize,
}

impl Default for StrangeLoopConfig {
    fn default() -> Self {
        Self {
            level0_size: 100,
            level1_size: 20,
            level2_size: 5,
            dt: 1.0,
            strengthen_threshold: 0.7,
            prune_threshold: 0.3,
            prune_weight_threshold: 0.1,
            observation_window: 100,
        }
    }
}

/// Meta-level in the hierarchy
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum MetaLevel {
    /// Level 0: Object graph being optimized
    Object,
    /// Level 1: Observer SNN watching Level 0
    Observer,
    /// Level 2: Meta-neuron modulating observer
    Meta,
}

/// Actions that Level 2 can take to modify Level 0
#[derive(Debug, Clone)]
pub enum MetaAction {
    /// Strengthen edges where observer activity is high
    Strengthen(f64),
    /// Remove edges below mincut contribution threshold
    Prune(f64),
    /// Radical reorganization using current mincut as seed
    Restructure,
    /// No action needed
    NoOp,
}

/// Cross-level influence matrix
#[derive(Debug, Clone)]
pub struct CrossLevelInfluence {
    /// Level 0 → Level 1 influence weights
    pub l0_to_l1: Vec<Vec<f64>>,
    /// Level 1 → Level 2 influence weights
    pub l1_to_l2: Vec<Vec<f64>>,
    /// Level 2 → Level 0 influence (the strange part)
    pub l2_to_l0: Vec<Vec<f64>>,
}

/// Meta-neuron for Level 2 decisions
#[derive(Debug, Clone)]
pub struct MetaNeuron {
    /// ID of this meta-neuron
    pub id: usize,
    /// Internal state
    pub state: f64,
    /// Decision threshold
    pub threshold: f64,
    /// History of observer summaries
    history: VecDeque<f64>,
    /// Window size for decisions
    window: usize,
}

impl MetaNeuron {
    /// Create a new meta-neuron
    pub fn new(id: usize, window: usize) -> Self {
        Self {
            id,
            state: 0.0,
            threshold: 0.5,
            history: VecDeque::with_capacity(window),
            window,
        }
    }

    /// Process observer summary and produce modulation signal
    pub fn modulate(&mut self, observer_summary: f64) -> MetaAction {
        // Update history
        self.history.push_back(observer_summary);
        if self.history.len() > self.window {
            self.history.pop_front();
        }

        // Compute trend
        let mean: f64 = self.history.iter().sum::<f64>() / self.history.len() as f64;
        let recent_mean: f64 = self.history.iter().rev().take(10).sum::<f64>()
            / 10.0f64.min(self.history.len() as f64);

        self.state = recent_mean - mean;

        // Decide action based on state
        if self.state > self.threshold {
            MetaAction::Strengthen(observer_summary)
        } else if self.state < -self.threshold {
            MetaAction::Prune(observer_summary.abs())
        } else if observer_summary.abs() > 2.0 * self.threshold {
            MetaAction::Restructure
        } else {
            MetaAction::NoOp
        }
    }

    /// Reset meta-neuron state
    pub fn reset(&mut self) {
        self.state = 0.0;
        self.history.clear();
    }
}

/// Meta-Cognitive MinCut with Strange Loop
pub struct MetaCognitiveMinCut {
    /// Level 0: Object graph being optimized
    object_graph: DynamicGraph,
    /// Level 1: SNN observing object graph statistics
    observer_snn: SpikingNetwork,
    /// Level 2: Meta-neurons modulating observer behavior
    meta_neurons: Vec<MetaNeuron>,
    /// Cross-level influence matrix
    influence: CrossLevelInfluence,
    /// Configuration
    config: StrangeLoopConfig,
    /// Current simulation time
    time: SimTime,
    /// History of mincut values
    mincut_history: VecDeque<f64>,
    /// History of actions taken
    action_history: Vec<MetaAction>,
}

impl MetaCognitiveMinCut {
    /// Create a new meta-cognitive mincut system
    pub fn new(graph: DynamicGraph, config: StrangeLoopConfig) -> Self {
        let n = graph.num_vertices();

        // Level 1: Observer SNN
        let observer_config = NetworkConfig {
            layers: vec![LayerConfig::new(config.level1_size)],
            ..NetworkConfig::default()
        };
        let observer_snn = SpikingNetwork::new(observer_config);

        // Level 2: Meta-neurons
        let meta_neurons: Vec<_> = (0..config.level2_size)
            .map(|i| MetaNeuron::new(i, config.observation_window))
            .collect();

        // Initialize cross-level influence
        let influence = CrossLevelInfluence {
            l0_to_l1: vec![vec![0.1; config.level1_size]; n],
            l1_to_l2: vec![vec![0.1; config.level2_size]; config.level1_size],
            l2_to_l0: vec![vec![0.1; n]; config.level2_size],
        };

        let observation_window = config.observation_window;

        Self {
            object_graph: graph,
            observer_snn,
            meta_neurons,
            influence,
            config,
            time: 0.0,
            mincut_history: VecDeque::with_capacity(observation_window),
            action_history: Vec::new(),
        }
    }

    /// Encode graph state as spike pattern for Level 1
    fn encode_graph_state(&self) -> Vec<f64> {
        let vertices = self.object_graph.vertices();
        let mut encoding = vec![0.0; self.config.level1_size];

        for (i, v) in vertices.iter().enumerate() {
            let degree = self.object_graph.degree(*v) as f64;
            let weight_sum: f64 = self
                .object_graph
                .neighbors(*v)
                .iter()
                .filter_map(|(_, _)| Some(1.0))
                .sum();

            // Project to observer neurons
            for j in 0..encoding.len() {
                if i < self.influence.l0_to_l1.len() && j < self.influence.l0_to_l1[i].len() {
                    encoding[j] += self.influence.l0_to_l1[i][j] * (degree + weight_sum);
                }
            }
        }

        encoding
    }

    /// Get population rate as observer summary
    fn observer_summary(&self) -> f64 {
        self.observer_snn.layer_rate(0, 100.0)
    }

    /// Find high-correlation pairs in observer SNN
    fn high_correlation_pairs(&self, threshold: f64) -> Vec<(VertexId, VertexId)> {
        let sync_matrix = self.observer_snn.synchrony_matrix();
        let vertices = self.object_graph.vertices();
        let mut pairs = Vec::new();

        for i in 0..sync_matrix.len().min(vertices.len()) {
            for j in (i + 1)..sync_matrix[i].len().min(vertices.len()) {
                if sync_matrix[i][j] > threshold {
                    pairs.push((vertices[i], vertices[j]));
                }
            }
        }

        pairs
    }

    /// Compute mincut contribution for each edge (simplified)
    fn mincut_contribution(&self, edge: &crate::graph::Edge) -> f64 {
        // Simplified: degree-based contribution
        let src_degree = self.object_graph.degree(edge.source) as f64;
        let tgt_degree = self.object_graph.degree(edge.target) as f64;

        edge.weight / (src_degree + tgt_degree).max(1.0)
    }

    /// Rebuild graph from partition (simplified)
    fn rebuild_from_partition(&mut self, vertices: &[VertexId]) {
        // Keep only edges within the partition
        let vertex_set: std::collections::HashSet<_> = vertices.iter().collect();

        let edges_to_remove: Vec<_> = self
            .object_graph
            .edges()
            .iter()
            .filter(|e| !vertex_set.contains(&e.source) || !vertex_set.contains(&e.target))
            .map(|e| (e.source, e.target))
            .collect();

        for (u, v) in edges_to_remove {
            let _ = self.object_graph.delete_edge(u, v);
        }
    }

    /// Execute one strange loop iteration
    pub fn strange_loop_step(&mut self) -> MetaAction {
        // Level 0 → Level 1: Encode graph state as spike patterns
        let graph_state = self.encode_graph_state();
        self.observer_snn.inject_current(&graph_state);

        // Level 1 dynamics: Observer SNN processes graph state
        let _observer_spikes = self.observer_snn.step();

        // Level 1 → Level 2: Meta-neuron receives observer output
        let observer_summary = self.observer_summary();

        // Level 2 decision: Aggregate meta-neuron decisions
        let mut actions = Vec::new();
        for meta_neuron in &mut self.meta_neurons {
            actions.push(meta_neuron.modulate(observer_summary));
        }

        // Select dominant action (simplified: first non-NoOp)
        let action = actions
            .into_iter()
            .find(|a| !matches!(a, MetaAction::NoOp))
            .unwrap_or(MetaAction::NoOp);

        // Level 2 → Level 0: Close the strange loop
        match &action {
            MetaAction::Strengthen(threshold) => {
                // Add edges where observer activity is high
                let hot_pairs = self.high_correlation_pairs(*threshold);
                for (u, v) in hot_pairs {
                    if !self.object_graph.has_edge(u, v) {
                        let _ = self.object_graph.insert_edge(u, v, 1.0);
                    } else {
                        // Strengthen existing edge
                        if let Some(edge) = self.object_graph.get_edge(u, v) {
                            let _ = self
                                .object_graph
                                .update_edge_weight(u, v, edge.weight * 1.1);
                        }
                    }
                }
            }
            MetaAction::Prune(threshold) => {
                // Remove edges below mincut contribution threshold
                let weak_edges: Vec<_> = self
                    .object_graph
                    .edges()
                    .iter()
                    .filter(|e| self.mincut_contribution(e) < *threshold)
                    .map(|e| (e.source, e.target))
                    .collect();

                for (u, v) in weak_edges {
                    let _ = self.object_graph.delete_edge(u, v);
                }
            }
            MetaAction::Restructure => {
                // Use largest connected component
                let components = self.object_graph.connected_components();
                if let Some(largest) = components.iter().max_by_key(|c| c.len()) {
                    if largest.len() < self.object_graph.num_vertices() {
                        self.rebuild_from_partition(largest);
                    }
                }
            }
            MetaAction::NoOp => {}
        }

        self.time += self.config.dt;
        self.action_history.push(action.clone());

        action
    }

    /// Get object graph
    pub fn graph(&self) -> &DynamicGraph {
        &self.object_graph
    }

    /// Get mutable object graph
    pub fn graph_mut(&mut self) -> &mut DynamicGraph {
        &mut self.object_graph
    }

    /// Get observer SNN
    pub fn observer(&self) -> &SpikingNetwork {
        &self.observer_snn
    }

    /// Get action history
    pub fn action_history(&self) -> &[MetaAction] {
        &self.action_history
    }

    /// Get meta-level state summary
    pub fn level_summary(&self) -> (f64, f64, f64) {
        let l0 = self.object_graph.num_edges() as f64;
        let l1 = self.observer_summary();
        let l2 =
            self.meta_neurons.iter().map(|m| m.state).sum::<f64>() / self.meta_neurons.len() as f64;

        (l0, l1, l2)
    }

    /// Reset the system
    pub fn reset(&mut self) {
        self.observer_snn.reset();
        for meta in &mut self.meta_neurons {
            meta.reset();
        }
        self.time = 0.0;
        self.mincut_history.clear();
        self.action_history.clear();
    }

    /// Run multiple strange loop iterations
    pub fn run(&mut self, steps: usize) -> Vec<MetaAction> {
        let mut actions = Vec::new();
        for _ in 0..steps {
            actions.push(self.strange_loop_step());
        }
        actions
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_meta_neuron() {
        let mut neuron = MetaNeuron::new(0, 10);

        // Feed increasing summaries
        for i in 0..15 {
            let _ = neuron.modulate(0.1 * i as f64);
        }

        // Should have accumulated state
        assert!(neuron.history.len() == 10);
    }

    #[test]
    fn test_strange_loop_creation() {
        let graph = DynamicGraph::new();
        for i in 0..10 {
            graph.insert_edge(i, (i + 1) % 10, 1.0).unwrap();
        }

        let config = StrangeLoopConfig::default();
        let system = MetaCognitiveMinCut::new(graph, config);

        let (l0, l1, l2) = system.level_summary();
        assert!(l0 > 0.0);
    }

    #[test]
    fn test_strange_loop_step() {
        let graph = DynamicGraph::new();
        for i in 0..10 {
            for j in (i + 1)..10 {
                graph.insert_edge(i, j, 1.0).unwrap();
            }
        }

        let config = StrangeLoopConfig::default();
        let mut system = MetaCognitiveMinCut::new(graph, config);

        // Run a few steps
        let actions = system.run(5);
        assert_eq!(actions.len(), 5);
    }
}