Skip to main content

ruvector_robotics/cognitive/
cognitive_core.rs

1//! Central cognitive loop: perceive -> think -> act -> learn.
2//!
3//! The [`CognitiveCore`] drives the robot's high-level autonomy by filtering
4//! percepts, selecting actions through utility-based reasoning, and
5//! incorporating feedback to improve future decisions.
6
7use serde::{Deserialize, Serialize};
8use std::collections::VecDeque;
9
10// ---------------------------------------------------------------------------
11// Enums
12// ---------------------------------------------------------------------------
13
14/// Operating mode of the cognitive system.
15#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
16pub enum CognitiveMode {
17    /// Fast stimulus-response behaviour.
18    Reactive,
19    /// Goal-directed planning and reasoning.
20    Deliberative,
21    /// Override mode for safety-critical situations.
22    Emergency,
23}
24
25/// Current phase of the cognitive loop.
26#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
27pub enum CognitiveState {
28    Idle,
29    Perceiving,
30    Thinking,
31    Acting,
32    Learning,
33}
34
35/// The kind of action the robot can execute.
36#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
37pub enum ActionType {
38    Move([f64; 3]),
39    Rotate(f64),
40    Grasp(String),
41    Release,
42    Speak(String),
43    Wait(u64),
44}
45
46// ---------------------------------------------------------------------------
47// Data structs
48// ---------------------------------------------------------------------------
49
50/// A command to execute an action with priority and confidence metadata.
51#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
52pub struct ActionCommand {
53    pub action: ActionType,
54    pub priority: u8,
55    pub confidence: f64,
56}
57
58/// A single percept received from a sensor or subsystem.
59#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
60pub struct Percept {
61    pub source: String,
62    pub data: Vec<f64>,
63    pub confidence: f64,
64    pub timestamp: i64,
65}
66
67/// A decision produced by the think phase.
68#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
69pub struct Decision {
70    pub action: ActionCommand,
71    pub reasoning: String,
72    pub utility: f64,
73}
74
75/// Feedback from the environment after executing an action.
76#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
77pub struct Outcome {
78    pub success: bool,
79    pub reward: f64,
80    pub description: String,
81}
82
83/// Configuration for the cognitive core.
84#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
85pub struct CognitiveConfig {
86    pub mode: CognitiveMode,
87    pub attention_threshold: f64,
88    pub learning_rate: f64,
89    pub max_percepts: usize,
90}
91
92impl Default for CognitiveConfig {
93    fn default() -> Self {
94        Self {
95            mode: CognitiveMode::Reactive,
96            attention_threshold: 0.5,
97            learning_rate: 0.01,
98            max_percepts: 100,
99        }
100    }
101}
102
103// ---------------------------------------------------------------------------
104// Core
105// ---------------------------------------------------------------------------
106
107/// Central cognitive controller implementing perceive-think-act-learn.
108#[derive(Debug, Clone)]
109pub struct CognitiveCore {
110    state: CognitiveState,
111    config: CognitiveConfig,
112    percept_buffer: VecDeque<Percept>,
113    decision_history: Vec<Decision>,
114    cumulative_reward: f64,
115}
116
117impl CognitiveCore {
118    /// Create a new cognitive core with the given configuration.
119    pub fn new(config: CognitiveConfig) -> Self {
120        Self {
121            state: CognitiveState::Idle,
122            config,
123            percept_buffer: VecDeque::new(),
124            decision_history: Vec::new(),
125            cumulative_reward: 0.0,
126        }
127    }
128
129    /// Ingest a percept and transition to the Perceiving state.
130    ///
131    /// Percepts below the attention threshold are silently dropped.
132    /// When the buffer exceeds `max_percepts`, the oldest entry is removed.
133    pub fn perceive(&mut self, percept: Percept) -> CognitiveState {
134        self.state = CognitiveState::Perceiving;
135
136        if percept.confidence < self.config.attention_threshold {
137            return self.state;
138        }
139
140        if self.percept_buffer.len() >= self.config.max_percepts {
141            self.percept_buffer.pop_front(); // O(1) with VecDeque
142        }
143        self.percept_buffer.push_back(percept);
144        self.state
145    }
146
147    /// Deliberate over buffered percepts and produce a decision.
148    ///
149    /// Returns `None` when no percepts are available.
150    pub fn think(&mut self) -> Option<Decision> {
151        self.state = CognitiveState::Thinking;
152
153        if self.percept_buffer.is_empty() {
154            return None;
155        }
156
157        // Simple heuristic: pick the most confident percept and derive an action.
158        let best = self
159            .percept_buffer
160            .iter()
161            .max_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap_or(std::cmp::Ordering::Equal))?;
162
163        let action_type = if best.data.len() >= 3 {
164            ActionType::Move([best.data[0], best.data[1], best.data[2]])
165        } else {
166            ActionType::Wait(100)
167        };
168
169        let decision = Decision {
170            action: ActionCommand {
171                action: action_type,
172                priority: match self.config.mode {
173                    CognitiveMode::Emergency => 255,
174                    CognitiveMode::Deliberative => 128,
175                    CognitiveMode::Reactive => 64,
176                },
177                confidence: best.confidence,
178            },
179            reasoning: format!("Best percept from '{}' (conf={:.2})", best.source, best.confidence),
180            utility: best.confidence,
181        };
182
183        self.decision_history.push(decision.clone());
184        Some(decision)
185    }
186
187    /// Convert a decision into an executable action command.
188    pub fn act(&mut self, decision: Decision) -> ActionCommand {
189        self.state = CognitiveState::Acting;
190        decision.action
191    }
192
193    /// Incorporate feedback from the environment to improve future behaviour.
194    pub fn learn(&mut self, outcome: Outcome) {
195        self.state = CognitiveState::Learning;
196        self.cumulative_reward += outcome.reward * self.config.learning_rate;
197
198        // Adjust attention threshold based on success/failure.
199        if outcome.success {
200            self.config.attention_threshold =
201                (self.config.attention_threshold - 0.01).max(0.1);
202        } else {
203            self.config.attention_threshold =
204                (self.config.attention_threshold + 0.01).min(0.9);
205        }
206
207        // Clear processed percepts so the next cycle starts fresh.
208        self.percept_buffer.clear();
209        self.state = CognitiveState::Idle;
210    }
211
212    /// Current cognitive state.
213    pub fn state(&self) -> CognitiveState {
214        self.state
215    }
216
217    /// Current operating mode.
218    pub fn mode(&self) -> CognitiveMode {
219        self.config.mode
220    }
221
222    /// Number of percepts currently buffered.
223    pub fn percept_count(&self) -> usize {
224        self.percept_buffer.len()
225    }
226
227    /// Number of decisions made so far.
228    pub fn decision_count(&self) -> usize {
229        self.decision_history.len()
230    }
231
232    /// Accumulated reward scaled by learning rate.
233    pub fn cumulative_reward(&self) -> f64 {
234        self.cumulative_reward
235    }
236}
237
238// ---------------------------------------------------------------------------
239// Tests
240// ---------------------------------------------------------------------------
241
242#[cfg(test)]
243mod tests {
244    use super::*;
245
246    fn default_core() -> CognitiveCore {
247        CognitiveCore::new(CognitiveConfig::default())
248    }
249
250    fn make_percept(source: &str, data: Vec<f64>, confidence: f64) -> Percept {
251        Percept {
252            source: source.into(),
253            data,
254            confidence,
255            timestamp: 1000,
256        }
257    }
258
259    #[test]
260    fn test_initial_state() {
261        let core = default_core();
262        assert_eq!(core.state(), CognitiveState::Idle);
263        assert_eq!(core.mode(), CognitiveMode::Reactive);
264        assert_eq!(core.percept_count(), 0);
265    }
266
267    #[test]
268    fn test_perceive_above_threshold() {
269        let mut core = default_core();
270        let state = core.perceive(make_percept("lidar", vec![1.0, 2.0, 3.0], 0.8));
271        assert_eq!(state, CognitiveState::Perceiving);
272        assert_eq!(core.percept_count(), 1);
273    }
274
275    #[test]
276    fn test_perceive_below_threshold() {
277        let mut core = default_core();
278        core.perceive(make_percept("lidar", vec![1.0], 0.1));
279        assert_eq!(core.percept_count(), 0);
280    }
281
282    #[test]
283    fn test_think_produces_decision() {
284        let mut core = default_core();
285        core.perceive(make_percept("cam", vec![1.0, 2.0, 3.0], 0.9));
286        let decision = core.think();
287        assert!(decision.is_some());
288        let d = decision.unwrap();
289        assert_eq!(d.action.priority, 64); // Reactive mode
290        assert_eq!(core.decision_count(), 1);
291    }
292
293    #[test]
294    fn test_think_empty_buffer() {
295        let mut core = default_core();
296        assert!(core.think().is_none());
297    }
298
299    #[test]
300    fn test_act_returns_command() {
301        let mut core = default_core();
302        core.perceive(make_percept("cam", vec![1.0, 2.0, 3.0], 0.9));
303        let decision = core.think().unwrap();
304        let cmd = core.act(decision);
305        assert_eq!(cmd.action, ActionType::Move([1.0, 2.0, 3.0]));
306        assert_eq!(core.state(), CognitiveState::Acting);
307    }
308
309    #[test]
310    fn test_learn_adjusts_threshold() {
311        let mut core = default_core();
312        let initial = core.config.attention_threshold;
313        core.learn(Outcome {
314            success: true,
315            reward: 1.0,
316            description: "ok".into(),
317        });
318        assert!(core.config.attention_threshold < initial);
319        assert_eq!(core.state(), CognitiveState::Idle);
320    }
321
322    #[test]
323    fn test_learn_failure_raises_threshold() {
324        let mut core = default_core();
325        let initial = core.config.attention_threshold;
326        core.learn(Outcome {
327            success: false,
328            reward: -1.0,
329            description: "fail".into(),
330        });
331        assert!(core.config.attention_threshold > initial);
332    }
333
334    #[test]
335    fn test_emergency_priority() {
336        let mut core = CognitiveCore::new(CognitiveConfig {
337            mode: CognitiveMode::Emergency,
338            ..CognitiveConfig::default()
339        });
340        core.perceive(make_percept("ir", vec![5.0, 6.0, 7.0], 0.99));
341        let d = core.think().unwrap();
342        assert_eq!(d.action.priority, 255);
343    }
344
345    #[test]
346    fn test_percept_buffer_overflow() {
347        let mut core = CognitiveCore::new(CognitiveConfig {
348            max_percepts: 2,
349            ..CognitiveConfig::default()
350        });
351        core.perceive(make_percept("a", vec![1.0], 0.8));
352        core.perceive(make_percept("b", vec![2.0], 0.8));
353        core.perceive(make_percept("c", vec![3.0], 0.8));
354        assert_eq!(core.percept_count(), 2);
355    }
356}