Skip to main content

converge_knowledge/agentic/
mod.rs

1//! AgenticDB: Agent Memory System
2//!
3//! Implements the 5-table architecture from ruvector for AI agent memory:
4//!
5//! 1. **Reflexion Episodes** - Self-critique memory for learning from mistakes
6//! 2. **Skill Library** - Consolidated successful patterns
7//! 3. **Causal Memory** - Hypergraph relationships between concepts
8//! 4. **Learning Sessions** - RL training data with rewards
9//! 5. **Vector Store** - Core embeddings (handled by main KnowledgeBase)
10//!
11//! Plus advanced learning mechanisms:
12//!
13//! - **Temporal Patterns** - Time crystals for periodic behavior detection
14//! - **Online Learning** - Continual adaptation with EWC forgetting prevention
15//! - **Meta-Learning** - Learning to learn with MAML/Reptile-style adaptation
16
17mod causal;
18mod meta;
19mod online;
20mod reflexion;
21mod sessions;
22mod skills;
23mod temporal;
24
25pub use causal::{CausalEdge, CausalMemory, CausalNode, Hyperedge};
26pub use meta::{FewShotLearner, LearningStrategy, MetaLearner, TaskFeatures};
27pub use online::{DriftDetector, Experience, ExperienceWindow, OnlineLearner, ParameterSnapshot};
28pub use reflexion::{Critique, CritiqueType, ReflexionEpisode, ReflexionMemory};
29pub use sessions::{LearningSession, Reward, SessionTurn};
30pub use skills::{Skill, SkillLibrary, SkillPattern};
31pub use temporal::{TemporalMemory, TemporalOccurrence, TemporalPeriod, TimeCrystal};
32
33use serde::{Deserialize, Serialize};
34use std::sync::Arc;
35use tokio::sync::RwLock;
36use uuid::Uuid;
37
38/// AgenticDB: Complete agent memory system.
39///
40/// Provides persistent memory for AI agents with:
41/// - Self-reflection and learning from mistakes
42/// - Skill consolidation and reuse
43/// - Causal reasoning over relationships
44/// - Reinforcement learning from sessions
45/// - Temporal pattern detection (time crystals)
46/// - Online/continual learning with forgetting prevention
47/// - Meta-learning for quick adaptation
48pub struct AgenticDB {
49    /// Reflexion episodes for self-critique.
50    pub reflexion: Arc<RwLock<ReflexionMemory>>,
51
52    /// Library of learned skills.
53    pub skills: Arc<RwLock<SkillLibrary>>,
54
55    /// Causal relationships between concepts.
56    pub causal: Arc<RwLock<CausalMemory>>,
57
58    /// Learning sessions with rewards.
59    pub sessions: Arc<RwLock<Vec<LearningSession>>>,
60
61    /// Temporal patterns (time crystals).
62    pub temporal: Arc<RwLock<TemporalMemory>>,
63
64    /// Meta-learner for quick task adaptation.
65    pub meta: Arc<RwLock<MetaLearner>>,
66
67    /// Experience window for continual learning.
68    pub experiences: Arc<RwLock<ExperienceWindow>>,
69
70    /// Distribution drift detector.
71    pub drift_detector: Arc<RwLock<DriftDetector>>,
72
73    /// Storage path.
74    #[allow(dead_code)]
75    path: Option<String>,
76}
77
78impl AgenticDB {
79    /// Create a new in-memory AgenticDB.
80    ///
81    /// # Arguments
82    /// * `meta_param_dim` - Dimension of meta-learning parameters (default 64)
83    /// * `drift_feature_dim` - Dimension of features for drift detection (default 64)
84    pub fn new() -> Self {
85        Self::with_dimensions(64, 64)
86    }
87
88    /// Create with specific dimensions for meta-learning.
89    pub fn with_dimensions(meta_param_dim: usize, drift_feature_dim: usize) -> Self {
90        Self {
91            reflexion: Arc::new(RwLock::new(ReflexionMemory::new())),
92            skills: Arc::new(RwLock::new(SkillLibrary::new())),
93            causal: Arc::new(RwLock::new(CausalMemory::new())),
94            sessions: Arc::new(RwLock::new(Vec::new())),
95            temporal: Arc::new(RwLock::new(TemporalMemory::new())),
96            meta: Arc::new(RwLock::new(MetaLearner::new("agent_meta", meta_param_dim))),
97            experiences: Arc::new(RwLock::new(ExperienceWindow::new(1000))),
98            drift_detector: Arc::new(RwLock::new(DriftDetector::new(drift_feature_dim))),
99            path: None,
100        }
101    }
102
103    /// Create with persistence.
104    pub fn with_path(path: impl Into<String>) -> Self {
105        Self {
106            reflexion: Arc::new(RwLock::new(ReflexionMemory::new())),
107            skills: Arc::new(RwLock::new(SkillLibrary::new())),
108            causal: Arc::new(RwLock::new(CausalMemory::new())),
109            sessions: Arc::new(RwLock::new(Vec::new())),
110            temporal: Arc::new(RwLock::new(TemporalMemory::new())),
111            meta: Arc::new(RwLock::new(MetaLearner::new("agent_meta", 64))),
112            experiences: Arc::new(RwLock::new(ExperienceWindow::new(1000))),
113            drift_detector: Arc::new(RwLock::new(DriftDetector::new(64))),
114            path: Some(path.into()),
115        }
116    }
117
118    /// Record a reflexion episode (self-critique).
119    ///
120    /// # Example
121    /// ```rust,no_run
122    /// use converge_knowledge::agentic::{AgenticDB, ReflexionEpisode, Critique, CritiqueType};
123    ///
124    /// # async fn example() {
125    /// let db = AgenticDB::new();
126    ///
127    /// // Agent attempted something and failed
128    /// let episode = ReflexionEpisode::new(
129    ///     "write_code",
130    ///     "Write a function to parse JSON",
131    ///     "fn parse() { /* incomplete */ }",
132    ///     false, // did not succeed
133    /// )
134    /// .with_critique(Critique::new(
135    ///     CritiqueType::MissingStep,
136    ///     "Did not handle error cases",
137    ///     "Add Result return type and error handling",
138    /// ));
139    ///
140    /// db.add_reflexion(episode).await;
141    /// # }
142    /// ```
143    pub async fn add_reflexion(&self, episode: ReflexionEpisode) {
144        let mut reflexion = self.reflexion.write().await;
145        reflexion.add_episode(episode);
146    }
147
148    /// Query similar past failures to avoid repeating mistakes.
149    pub async fn query_similar_failures(&self, task: &str, limit: usize) -> Vec<ReflexionEpisode> {
150        let reflexion = self.reflexion.read().await;
151        reflexion.find_similar_failures(task, limit)
152    }
153
154    /// Register a successful skill pattern.
155    pub async fn register_skill(&self, skill: Skill) {
156        let mut skills = self.skills.write().await;
157        skills.add_skill(skill);
158    }
159
160    /// Find applicable skills for a task.
161    pub async fn find_skills(&self, _task_description: &str) -> Vec<&Skill> {
162        // Note: This is a simplified version
163        Vec::new()
164    }
165
166    /// Add a causal relationship.
167    pub async fn add_causal_link(
168        &self,
169        cause: Uuid,
170        effect: Uuid,
171        relationship: impl Into<String>,
172        strength: f32,
173    ) {
174        let mut causal = self.causal.write().await;
175        causal.add_edge(CausalEdge {
176            id: Uuid::new_v4(),
177            cause,
178            effect,
179            relationship: relationship.into(),
180            strength,
181            evidence_count: 1,
182        });
183    }
184
185    /// Start a new learning session.
186    pub async fn start_session(&self, goal: impl Into<String>) -> Uuid {
187        let session = LearningSession::new(goal);
188        let id = session.id;
189        let mut sessions = self.sessions.write().await;
190        sessions.push(session);
191        id
192    }
193
194    /// Record a turn in a session.
195    pub async fn record_turn(
196        &self,
197        session_id: Uuid,
198        action: impl Into<String>,
199        observation: impl Into<String>,
200        reward: Reward,
201    ) {
202        let mut sessions = self.sessions.write().await;
203        if let Some(session) = sessions.iter_mut().find(|s| s.id == session_id) {
204            session.add_turn(SessionTurn {
205                action: action.into(),
206                observation: observation.into(),
207                reward,
208                timestamp: chrono::Utc::now(),
209            });
210        }
211    }
212
213    /// Get statistics about the agent memory.
214    pub async fn stats(&self) -> AgenticStats {
215        let reflexion = self.reflexion.read().await;
216        let skills = self.skills.read().await;
217        let causal = self.causal.read().await;
218        let sessions = self.sessions.read().await;
219        let temporal = self.temporal.read().await;
220        let meta = self.meta.read().await;
221        let experiences = self.experiences.read().await;
222
223        AgenticStats {
224            reflexion_episodes: reflexion.len(),
225            failed_episodes: reflexion.failure_count(),
226            skills_count: skills.len(),
227            causal_nodes: causal.node_count(),
228            causal_edges: causal.edge_count(),
229            total_sessions: sessions.len(),
230            total_turns: sessions.iter().map(|s| s.turns.len()).sum(),
231            temporal_patterns: temporal.len(),
232            meta_tasks_learned: meta.num_tasks() as usize,
233            learning_strategies: meta.num_strategies(),
234            experience_buffer_size: experiences.len(),
235        }
236    }
237
238    /// Record a temporal pattern (time crystal).
239    ///
240    /// Use this to track periodic behavior patterns like:
241    /// - Daily coding activity
242    /// - Weekly deployment schedules
243    /// - Monthly review cycles
244    pub async fn record_temporal(&self, pattern_name: &str, period: TemporalPeriod, value: f32) {
245        let mut temporal = self.temporal.write().await;
246        temporal.record(pattern_name, period, value);
247    }
248
249    /// Predict activity for a temporal pattern.
250    pub async fn predict_temporal(&self, pattern_name: &str) -> Option<f32> {
251        let temporal = self.temporal.read().await;
252        temporal.predict(pattern_name)
253    }
254
255    /// Add experience for continual learning.
256    pub async fn add_experience(
257        &mut self,
258        features: Vec<f32>,
259        target: f32,
260        task_id: Option<String>,
261    ) {
262        let mut experiences = self.experiences.write().await;
263        experiences.add(features, target, task_id);
264    }
265
266    /// Check for distribution drift.
267    ///
268    /// Returns true if the current features indicate a distribution shift
269    /// that may require model adaptation.
270    pub async fn check_drift(&self, features: &[f32]) -> bool {
271        let mut detector = self.drift_detector.write().await;
272        detector.update(features)
273    }
274
275    /// Update meta-learner after completing a task.
276    pub async fn meta_update(
277        &self,
278        task_id: &str,
279        final_params: &[f32],
280        task_embedding: Option<Vec<f32>>,
281    ) {
282        let mut meta = self.meta.write().await;
283        meta.meta_update(task_id, final_params, task_embedding);
284    }
285
286    /// Get initialization parameters for a new task.
287    pub async fn get_task_initialization(&self, task_embedding: Option<&[f32]>) -> Vec<f32> {
288        let meta = self.meta.read().await;
289        meta.initialize_for_task(task_embedding)
290    }
291
292    /// Register a learning strategy.
293    pub async fn register_strategy(&self, strategy: LearningStrategy) {
294        let mut meta = self.meta.write().await;
295        meta.register_strategy(strategy);
296    }
297
298    /// Select best strategy for a task.
299    pub async fn select_strategy(&self, task_features: &TaskFeatures) -> Option<String> {
300        let meta = self.meta.read().await;
301        meta.select_strategy(task_features).map(|s| s.name.clone())
302    }
303}
304
305impl Default for AgenticDB {
306    fn default() -> Self {
307        Self::new()
308    }
309}
310
311/// Statistics about the agent memory.
312#[derive(Debug, Clone, Serialize, Deserialize)]
313pub struct AgenticStats {
314    /// Total reflexion episodes recorded.
315    pub reflexion_episodes: usize,
316    /// Number of failed episodes.
317    pub failed_episodes: usize,
318    /// Number of skills in library.
319    pub skills_count: usize,
320    /// Number of causal nodes.
321    pub causal_nodes: usize,
322    /// Number of causal edges.
323    pub causal_edges: usize,
324    /// Total learning sessions.
325    pub total_sessions: usize,
326    /// Total turns across all sessions.
327    pub total_turns: usize,
328    /// Number of temporal patterns (time crystals).
329    pub temporal_patterns: usize,
330    /// Number of tasks the meta-learner has learned.
331    pub meta_tasks_learned: usize,
332    /// Number of learning strategies discovered.
333    pub learning_strategies: usize,
334    /// Current experience buffer size.
335    pub experience_buffer_size: usize,
336}
337
338#[cfg(test)]
339mod tests {
340    use super::*;
341
342    /// Test: Creating an AgenticDB and recording a reflexion episode.
343    ///
344    /// This demonstrates how an AI agent can:
345    /// 1. Attempt a task
346    /// 2. Fail and record what went wrong
347    /// 3. Store a self-critique for future reference
348    ///
349    /// The agent can later query these failures to avoid repeating mistakes.
350    #[tokio::test]
351    async fn test_reflexion_workflow() {
352        // Create a new agent memory database
353        let db = AgenticDB::new();
354
355        // Scenario: Agent tried to write code but made an error
356        let episode = ReflexionEpisode::new(
357            "code_generation",                                    // task type
358            "Write a function to calculate factorial",            // original goal
359            "fn factorial(n: i32) -> i32 { n * factorial(n-1) }", // what was attempted
360            false,                                                // did it succeed? No!
361        )
362        .with_critique(Critique::new(
363            CritiqueType::LogicError,
364            "Missing base case causes infinite recursion", // what went wrong
365            "Add: if n <= 1 { return 1; }",                // how to fix
366        ))
367        .with_critique(Critique::new(
368            CritiqueType::MissingStep,
369            "No handling for negative numbers",
370            "Add input validation or use unsigned type",
371        ));
372
373        // Store the episode
374        db.add_reflexion(episode).await;
375
376        // Verify it was stored
377        let stats = db.stats().await;
378        assert_eq!(stats.reflexion_episodes, 1);
379        assert_eq!(stats.failed_episodes, 1);
380
381        // Later, when attempting a similar task, query for past failures
382        let similar = db.query_similar_failures("factorial function", 5).await;
383        assert_eq!(similar.len(), 1);
384
385        // The agent can now learn from past mistakes!
386        let past_mistake = &similar[0];
387        assert!(!past_mistake.succeeded);
388        assert_eq!(past_mistake.critiques.len(), 2);
389    }
390
391    /// Test: Building a skill library from successful patterns.
392    ///
393    /// When an agent successfully completes a task, it can:
394    /// 1. Extract the successful pattern
395    /// 2. Store it in the skill library
396    /// 3. Reuse it for similar future tasks
397    #[tokio::test]
398    async fn test_skill_library() {
399        let db = AgenticDB::new();
400
401        // Agent successfully completed a task - consolidate as a skill
402        let skill = Skill::new(
403            "error_handling",
404            "Rust Error Handling Pattern",
405            vec![
406                SkillPattern::new("result_type", "fn do_thing() -> Result<T, Error> { ... }"),
407                SkillPattern::new("question_mark", "let value = risky_op()?;"),
408            ],
409        )
410        .with_success_rate(0.95)
411        .with_usage_count(42);
412
413        db.register_skill(skill).await;
414
415        let stats = db.stats().await;
416        assert_eq!(stats.skills_count, 1);
417    }
418
419    /// Test: Recording causal relationships.
420    ///
421    /// The agent can build a knowledge graph of cause-effect relationships:
422    /// - "Using unwrap() causes panic on None"
423    /// - "Adding tests causes higher code quality"
424    #[tokio::test]
425    async fn test_causal_memory() {
426        let db = AgenticDB::new();
427
428        let cause_id = Uuid::new_v4();
429        let effect_id = Uuid::new_v4();
430
431        // Record: "Using unwrap() → can cause panic"
432        db.add_causal_link(
433            cause_id, effect_id, "causes", 0.8, // 80% confidence
434        )
435        .await;
436
437        let stats = db.stats().await;
438        assert_eq!(stats.causal_edges, 1);
439    }
440
441    /// Test: Recording a learning session with rewards.
442    ///
443    /// This simulates reinforcement learning:
444    /// 1. Agent starts a session with a goal
445    /// 2. Takes actions and observes results
446    /// 3. Receives rewards (positive or negative)
447    /// 4. Learns from the trajectory
448    #[tokio::test]
449    async fn test_learning_session() {
450        let db = AgenticDB::new();
451
452        // Start a session
453        let session_id = db.start_session("Fix the bug in auth module").await;
454
455        // Turn 1: Agent reads the code
456        db.record_turn(
457            session_id,
458            "read_file auth.rs",
459            "Found potential null check issue on line 42",
460            Reward::Neutral,
461        )
462        .await;
463
464        // Turn 2: Agent makes a fix
465        db.record_turn(
466            session_id,
467            "edit auth.rs: add None check",
468            "File updated successfully",
469            Reward::Positive(0.5),
470        )
471        .await;
472
473        // Turn 3: Agent runs tests
474        db.record_turn(
475            session_id,
476            "run tests",
477            "All 15 tests passing",
478            Reward::Positive(1.0), // High reward for success!
479        )
480        .await;
481
482        let stats = db.stats().await;
483        assert_eq!(stats.total_sessions, 1);
484        assert_eq!(stats.total_turns, 3);
485    }
486
487    /// Test: Complete workflow demonstrating all 5 tables working together.
488    ///
489    /// Scenario: Agent is asked to implement a feature.
490    /// 1. Check past failures (Reflexion)
491    /// 2. Find applicable skills (Skill Library)
492    /// 3. Understand related concepts (Causal Memory)
493    /// 4. Execute and learn (Learning Session)
494    /// 5. Store embeddings (Vector Store - via main KB)
495    #[tokio::test]
496    async fn test_integrated_workflow() {
497        let db = AgenticDB::new();
498
499        // Step 1: Record a past failure to learn from
500        let past_failure = ReflexionEpisode::new(
501            "api_design",
502            "Design REST API endpoint",
503            "POST /users with no validation",
504            false,
505        )
506        .with_critique(Critique::new(
507            CritiqueType::DesignFlaw,
508            "No input validation leads to security issues",
509            "Always validate and sanitize inputs",
510        ));
511        db.add_reflexion(past_failure).await;
512
513        // Step 2: Register a successful skill
514        let skill = Skill::new(
515            "api_validation",
516            "Input Validation Pattern",
517            vec![SkillPattern::new("validate_first", "validate(&input)?;")],
518        );
519        db.register_skill(skill).await;
520
521        // Step 3: Add causal knowledge
522        let validation_id = Uuid::new_v4();
523        let security_id = Uuid::new_v4();
524        db.add_causal_link(validation_id, security_id, "improves", 0.9)
525            .await;
526
527        // Step 4: Start a new session for the current task
528        let session_id = db
529            .start_session("Implement user registration endpoint")
530            .await;
531
532        // Agent checks past failures first
533        let failures = db.query_similar_failures("api endpoint", 5).await;
534        assert!(!failures.is_empty(), "Should find past API failure");
535
536        // Agent uses skill and records turn
537        db.record_turn(
538            session_id,
539            "apply validation skill",
540            "Added input validation with proper error handling",
541            Reward::Positive(0.8),
542        )
543        .await;
544
545        // Final stats
546        let stats = db.stats().await;
547        assert_eq!(stats.reflexion_episodes, 1);
548        assert_eq!(stats.skills_count, 1);
549        assert_eq!(stats.causal_edges, 1);
550        assert_eq!(stats.total_sessions, 1);
551        assert_eq!(stats.total_turns, 1);
552
553        println!("AgenticDB Stats: {:?}", stats);
554    }
555}