synaptic_daa_swarm/
lib.rs

1//! Synaptic DAA Swarm - Distributed Autonomous Agent swarm intelligence
2//!
3//! This crate provides swarm intelligence capabilities for distributed
4//! autonomous agents in the Synaptic Neural Mesh ecosystem.
5
6use std::sync::Arc;
7use async_trait::async_trait;
8use dashmap::DashMap;
9use parking_lot::RwLock;
10use rand::Rng;
11use serde::{Serialize, Deserialize};
12use uuid::Uuid;
13use synaptic_neural_mesh::{Agent, NeuralMesh, Task, TaskRequirements};
14use synaptic_qudag_core::QuDAGNode;
15
16/// Swarm behaviors
17#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
18pub enum SwarmBehavior {
19    Flocking,
20    Foraging,
21    Exploration,
22    Consensus,
23    Optimization,
24}
25
26/// Swarm intelligence coordinator
27pub struct Swarm {
28    id: Uuid,
29    agents: Arc<DashMap<Uuid, SwarmAgent>>,
30    behaviors: Arc<RwLock<Vec<SwarmBehavior>>>,
31    mesh: Arc<NeuralMesh>,
32    state: Arc<RwLock<SwarmState>>,
33}
34
35/// Individual swarm agent
36#[derive(Debug, Clone)]
37pub struct SwarmAgent {
38    pub id: Uuid,
39    pub position: Vector3,
40    pub velocity: Vector3,
41    pub fitness: f64,
42    pub memory: Vec<f64>,
43}
44
45/// 3D vector for spatial positioning
46#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
47pub struct Vector3 {
48    pub x: f64,
49    pub y: f64,
50    pub z: f64,
51}
52
53impl Vector3 {
54    pub fn new(x: f64, y: f64, z: f64) -> Self {
55        Self { x, y, z }
56    }
57    
58    pub fn zero() -> Self {
59        Self { x: 0.0, y: 0.0, z: 0.0 }
60    }
61    
62    pub fn distance(&self, other: &Self) -> f64 {
63        let dx = self.x - other.x;
64        let dy = self.y - other.y;
65        let dz = self.z - other.z;
66        (dx * dx + dy * dy + dz * dz).sqrt()
67    }
68    
69    pub fn normalize(&mut self) {
70        let mag = (self.x * self.x + self.y * self.y + self.z * self.z).sqrt();
71        if mag > 0.0 {
72            self.x /= mag;
73            self.y /= mag;
74            self.z /= mag;
75        }
76    }
77}
78
79/// Swarm state
80#[derive(Debug, Clone)]
81pub struct SwarmState {
82    pub iteration: usize,
83    pub global_best: Option<Solution>,
84    pub convergence: f64,
85}
86
87/// Solution representation
88#[derive(Debug, Clone, Serialize, Deserialize)]
89pub struct Solution {
90    pub position: Vector3,
91    pub fitness: f64,
92    pub data: Vec<f64>,
93}
94
95impl Swarm {
96    /// Create a new swarm
97    pub fn new() -> Self {
98        Self {
99            id: Uuid::new_v4(),
100            agents: Arc::new(DashMap::new()),
101            behaviors: Arc::new(RwLock::new(Vec::new())),
102            mesh: Arc::new(NeuralMesh::new()),
103            state: Arc::new(RwLock::new(SwarmState {
104                iteration: 0,
105                global_best: None,
106                convergence: 0.0,
107            })),
108        }
109    }
110    
111    /// Add a behavior to the swarm
112    pub fn add_behavior(&self, behavior: SwarmBehavior) {
113        self.behaviors.write().push(behavior);
114    }
115    
116    /// Initialize swarm with agents
117    pub async fn initialize(&self, agent_count: usize) {
118        let mut rng = rand::thread_rng();
119        
120        for _ in 0..agent_count {
121            let agent = SwarmAgent {
122                id: Uuid::new_v4(),
123                position: Vector3::new(
124                    rng.gen_range(-100.0..100.0),
125                    rng.gen_range(-100.0..100.0),
126                    rng.gen_range(-100.0..100.0),
127                ),
128                velocity: Vector3::zero(),
129                fitness: 0.0,
130                memory: vec![0.0; 10],
131            };
132            
133            // Register with neural mesh
134            let mesh_agent = Agent::new(format!("swarm-agent-{}", agent.id));
135            self.mesh.add_agent(mesh_agent).await.ok();
136            
137            self.agents.insert(agent.id, agent);
138        }
139    }
140    
141    /// Run swarm simulation
142    pub async fn run(&self) {
143        loop {
144            self.update_iteration().await;
145            
146            // Check convergence
147            if self.state.read().convergence > 0.95 {
148                break;
149            }
150            
151            tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
152        }
153    }
154    
155    /// Update one iteration of the swarm
156    async fn update_iteration(&self) {
157        let behaviors = self.behaviors.read().clone();
158        
159        for behavior in &behaviors {
160            match behavior {
161                SwarmBehavior::Flocking => self.apply_flocking().await,
162                SwarmBehavior::Foraging => self.apply_foraging().await,
163                SwarmBehavior::Exploration => self.apply_exploration().await,
164                SwarmBehavior::Consensus => self.apply_consensus().await,
165                SwarmBehavior::Optimization => self.apply_optimization().await,
166            }
167        }
168        
169        // Update global state
170        self.update_global_state().await;
171        
172        // Increment iteration
173        self.state.write().iteration += 1;
174    }
175    
176    /// Apply flocking behavior
177    async fn apply_flocking(&self) {
178        let agents: Vec<SwarmAgent> = self.agents.iter().map(|a| a.clone()).collect();
179        
180        for mut agent_ref in self.agents.iter_mut() {
181            let agent = agent_ref.value_mut();
182            let mut separation = Vector3::zero();
183            let mut alignment = Vector3::zero();
184            let mut cohesion = Vector3::zero();
185            let mut count = 0;
186            
187            for other in &agents {
188                if other.id != agent.id {
189                    let distance = agent.position.distance(&other.position);
190                    
191                    if distance < 50.0 {
192                        // Separation
193                        if distance < 10.0 {
194                            let mut diff = Vector3 {
195                                x: agent.position.x - other.position.x,
196                                y: agent.position.y - other.position.y,
197                                z: agent.position.z - other.position.z,
198                            };
199                            diff.normalize();
200                            separation.x += diff.x;
201                            separation.y += diff.y;
202                            separation.z += diff.z;
203                        }
204                        
205                        // Alignment
206                        alignment.x += other.velocity.x;
207                        alignment.y += other.velocity.y;
208                        alignment.z += other.velocity.z;
209                        
210                        // Cohesion
211                        cohesion.x += other.position.x;
212                        cohesion.y += other.position.y;
213                        cohesion.z += other.position.z;
214                        
215                        count += 1;
216                    }
217                }
218            }
219            
220            if count > 0 {
221                // Apply forces
222                agent.velocity.x += separation.x * 0.1 + alignment.x * 0.05 + cohesion.x * 0.01;
223                agent.velocity.y += separation.y * 0.1 + alignment.y * 0.05 + cohesion.y * 0.01;
224                agent.velocity.z += separation.z * 0.1 + alignment.z * 0.05 + cohesion.z * 0.01;
225                
226                // Update position
227                agent.position.x += agent.velocity.x;
228                agent.position.y += agent.velocity.y;
229                agent.position.z += agent.velocity.z;
230            }
231        }
232    }
233    
234    /// Apply foraging behavior
235    async fn apply_foraging(&self) {
236        // Implement foraging logic
237        for mut agent in self.agents.iter_mut() {
238            // Simple random walk for now
239            let mut rng = rand::thread_rng();
240            agent.velocity.x += rng.gen_range(-1.0..1.0);
241            agent.velocity.y += rng.gen_range(-1.0..1.0);
242            agent.velocity.z += rng.gen_range(-1.0..1.0);
243            
244            agent.position.x += agent.velocity.x * 0.1;
245            agent.position.y += agent.velocity.y * 0.1;
246            agent.position.z += agent.velocity.z * 0.1;
247        }
248    }
249    
250    /// Apply exploration behavior
251    async fn apply_exploration(&self) {
252        // Implement exploration logic
253        let mut rng = rand::thread_rng();
254        
255        for mut agent in self.agents.iter_mut() {
256            // Levy flight pattern
257            if rng.gen::<f64>() < 0.1 {
258                agent.velocity.x = rng.gen_range(-10.0..10.0);
259                agent.velocity.y = rng.gen_range(-10.0..10.0);
260                agent.velocity.z = rng.gen_range(-10.0..10.0);
261            }
262        }
263    }
264    
265    /// Apply consensus behavior
266    async fn apply_consensus(&self) {
267        // Calculate average position
268        let agents: Vec<SwarmAgent> = self.agents.iter().map(|a| a.clone()).collect();
269        let count = agents.len() as f64;
270        
271        if count > 0.0 {
272            let mut avg_x = 0.0;
273            let mut avg_y = 0.0;
274            let mut avg_z = 0.0;
275            
276            for agent in &agents {
277                avg_x += agent.position.x;
278                avg_y += agent.position.y;
279                avg_z += agent.position.z;
280            }
281            
282            avg_x /= count;
283            avg_y /= count;
284            avg_z /= count;
285            
286            // Move towards consensus
287            for mut agent in self.agents.iter_mut() {
288                agent.velocity.x += (avg_x - agent.position.x) * 0.01;
289                agent.velocity.y += (avg_y - agent.position.y) * 0.01;
290                agent.velocity.z += (avg_z - agent.position.z) * 0.01;
291            }
292        }
293    }
294    
295    /// Apply optimization behavior
296    async fn apply_optimization(&self) {
297        // Particle swarm optimization
298        let global_best = self.state.read().global_best.clone();
299        
300        for mut agent in self.agents.iter_mut() {
301            if let Some(ref best) = global_best {
302                // Update velocity towards global best
303                agent.velocity.x += (best.position.x - agent.position.x) * 0.1;
304                agent.velocity.y += (best.position.y - agent.position.y) * 0.1;
305                agent.velocity.z += (best.position.z - agent.position.z) * 0.1;
306            }
307            
308            // Update fitness (example: distance from origin)
309            agent.fitness = 1.0 / (1.0 + agent.position.distance(&Vector3::zero()));
310        }
311    }
312    
313    /// Update global swarm state
314    async fn update_global_state(&self) {
315        let agents: Vec<SwarmAgent> = self.agents.iter().map(|a| a.clone()).collect();
316        
317        // Find best solution
318        if let Some(best_agent) = agents.iter().max_by(|a, b| a.fitness.partial_cmp(&b.fitness).unwrap()) {
319            let solution = Solution {
320                position: best_agent.position,
321                fitness: best_agent.fitness,
322                data: best_agent.memory.clone(),
323            };
324            
325            let mut state = self.state.write();
326            
327            // Update global best
328            if state.global_best.is_none() || solution.fitness > state.global_best.as_ref().unwrap().fitness {
329                state.global_best = Some(solution);
330            }
331            
332            // Calculate convergence
333            let fitness_variance = agents.iter()
334                .map(|a| (a.fitness - best_agent.fitness).powi(2))
335                .sum::<f64>() / agents.len() as f64;
336            
337            state.convergence = 1.0 / (1.0 + fitness_variance);
338        }
339    }
340    
341    /// Get swarm statistics
342    pub fn get_stats(&self) -> SwarmStats {
343        let state = self.state.read();
344        SwarmStats {
345            agent_count: self.agents.len(),
346            iteration: state.iteration,
347            convergence: state.convergence,
348            best_fitness: state.global_best.as_ref().map(|s| s.fitness).unwrap_or(0.0),
349        }
350    }
351}
352
353impl Default for Swarm {
354    fn default() -> Self {
355        Self::new()
356    }
357}
358
359/// Swarm statistics
360#[derive(Debug, Clone, Serialize, Deserialize)]
361pub struct SwarmStats {
362    pub agent_count: usize,
363    pub iteration: usize,
364    pub convergence: f64,
365    pub best_fitness: f64,
366}
367
368/// Evolutionary algorithm trait
369#[async_trait]
370pub trait EvolutionaryAlgorithm {
371    /// Initialize population
372    async fn initialize_population(&mut self, size: usize);
373    
374    /// Evaluate fitness
375    async fn evaluate_fitness(&mut self);
376    
377    /// Select parents
378    async fn selection(&mut self) -> Vec<Uuid>;
379    
380    /// Crossover operation
381    async fn crossover(&mut self, parent1: Uuid, parent2: Uuid) -> SwarmAgent;
382    
383    /// Mutation operation
384    async fn mutation(&mut self, agent: &mut SwarmAgent);
385    
386    /// Evolution step
387    async fn evolve(&mut self);
388}
389
390#[cfg(test)]
391mod tests {
392    use super::*;
393    
394    #[tokio::test]
395    async fn test_swarm_creation() {
396        let swarm = Swarm::new();
397        swarm.initialize(10).await;
398        
399        let stats = swarm.get_stats();
400        assert_eq!(stats.agent_count, 10);
401        assert_eq!(stats.iteration, 0);
402    }
403    
404    #[tokio::test]
405    async fn test_vector_operations() {
406        let mut v1 = Vector3::new(3.0, 4.0, 0.0);
407        let v2 = Vector3::new(0.0, 0.0, 0.0);
408        
409        assert_eq!(v1.distance(&v2), 5.0);
410        
411        v1.normalize();
412        assert!((v1.x - 0.6).abs() < 0.001);
413        assert!((v1.y - 0.8).abs() < 0.001);
414    }
415    
416    #[tokio::test]
417    async fn test_swarm_behaviors() {
418        let swarm = Swarm::new();
419        swarm.add_behavior(SwarmBehavior::Flocking);
420        swarm.add_behavior(SwarmBehavior::Optimization);
421        
422        swarm.initialize(5).await;
423        swarm.update_iteration().await;
424        
425        let stats = swarm.get_stats();
426        assert_eq!(stats.iteration, 1);
427    }
428}