ruvector_robotics/cognitive/
cognitive_core.rs1use serde::{Deserialize, Serialize};
8use std::collections::VecDeque;
9
10#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
16pub enum CognitiveMode {
17 Reactive,
19 Deliberative,
21 Emergency,
23}
24
25#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
27pub enum CognitiveState {
28 Idle,
29 Perceiving,
30 Thinking,
31 Acting,
32 Learning,
33}
34
35#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
37pub enum ActionType {
38 Move([f64; 3]),
39 Rotate(f64),
40 Grasp(String),
41 Release,
42 Speak(String),
43 Wait(u64),
44}
45
46#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
52pub struct ActionCommand {
53 pub action: ActionType,
54 pub priority: u8,
55 pub confidence: f64,
56}
57
58#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
60pub struct Percept {
61 pub source: String,
62 pub data: Vec<f64>,
63 pub confidence: f64,
64 pub timestamp: i64,
65}
66
67#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
69pub struct Decision {
70 pub action: ActionCommand,
71 pub reasoning: String,
72 pub utility: f64,
73}
74
75#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
77pub struct Outcome {
78 pub success: bool,
79 pub reward: f64,
80 pub description: String,
81}
82
83#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
85pub struct CognitiveConfig {
86 pub mode: CognitiveMode,
87 pub attention_threshold: f64,
88 pub learning_rate: f64,
89 pub max_percepts: usize,
90}
91
92impl Default for CognitiveConfig {
93 fn default() -> Self {
94 Self {
95 mode: CognitiveMode::Reactive,
96 attention_threshold: 0.5,
97 learning_rate: 0.01,
98 max_percepts: 100,
99 }
100 }
101}
102
103#[derive(Debug, Clone)]
109pub struct CognitiveCore {
110 state: CognitiveState,
111 config: CognitiveConfig,
112 percept_buffer: VecDeque<Percept>,
113 decision_history: Vec<Decision>,
114 cumulative_reward: f64,
115}
116
117impl CognitiveCore {
118 pub fn new(config: CognitiveConfig) -> Self {
120 Self {
121 state: CognitiveState::Idle,
122 config,
123 percept_buffer: VecDeque::new(),
124 decision_history: Vec::new(),
125 cumulative_reward: 0.0,
126 }
127 }
128
129 pub fn perceive(&mut self, percept: Percept) -> CognitiveState {
134 self.state = CognitiveState::Perceiving;
135
136 if percept.confidence < self.config.attention_threshold {
137 return self.state;
138 }
139
140 if self.percept_buffer.len() >= self.config.max_percepts {
141 self.percept_buffer.pop_front(); }
143 self.percept_buffer.push_back(percept);
144 self.state
145 }
146
147 pub fn think(&mut self) -> Option<Decision> {
151 self.state = CognitiveState::Thinking;
152
153 if self.percept_buffer.is_empty() {
154 return None;
155 }
156
157 let best = self
159 .percept_buffer
160 .iter()
161 .max_by(|a, b| a.confidence.partial_cmp(&b.confidence).unwrap_or(std::cmp::Ordering::Equal))?;
162
163 let action_type = if best.data.len() >= 3 {
164 ActionType::Move([best.data[0], best.data[1], best.data[2]])
165 } else {
166 ActionType::Wait(100)
167 };
168
169 let decision = Decision {
170 action: ActionCommand {
171 action: action_type,
172 priority: match self.config.mode {
173 CognitiveMode::Emergency => 255,
174 CognitiveMode::Deliberative => 128,
175 CognitiveMode::Reactive => 64,
176 },
177 confidence: best.confidence,
178 },
179 reasoning: format!("Best percept from '{}' (conf={:.2})", best.source, best.confidence),
180 utility: best.confidence,
181 };
182
183 self.decision_history.push(decision.clone());
184 Some(decision)
185 }
186
187 pub fn act(&mut self, decision: Decision) -> ActionCommand {
189 self.state = CognitiveState::Acting;
190 decision.action
191 }
192
193 pub fn learn(&mut self, outcome: Outcome) {
195 self.state = CognitiveState::Learning;
196 self.cumulative_reward += outcome.reward * self.config.learning_rate;
197
198 if outcome.success {
200 self.config.attention_threshold =
201 (self.config.attention_threshold - 0.01).max(0.1);
202 } else {
203 self.config.attention_threshold =
204 (self.config.attention_threshold + 0.01).min(0.9);
205 }
206
207 self.percept_buffer.clear();
209 self.state = CognitiveState::Idle;
210 }
211
212 pub fn state(&self) -> CognitiveState {
214 self.state
215 }
216
217 pub fn mode(&self) -> CognitiveMode {
219 self.config.mode
220 }
221
222 pub fn percept_count(&self) -> usize {
224 self.percept_buffer.len()
225 }
226
227 pub fn decision_count(&self) -> usize {
229 self.decision_history.len()
230 }
231
232 pub fn cumulative_reward(&self) -> f64 {
234 self.cumulative_reward
235 }
236}
237
238#[cfg(test)]
243mod tests {
244 use super::*;
245
246 fn default_core() -> CognitiveCore {
247 CognitiveCore::new(CognitiveConfig::default())
248 }
249
250 fn make_percept(source: &str, data: Vec<f64>, confidence: f64) -> Percept {
251 Percept {
252 source: source.into(),
253 data,
254 confidence,
255 timestamp: 1000,
256 }
257 }
258
259 #[test]
260 fn test_initial_state() {
261 let core = default_core();
262 assert_eq!(core.state(), CognitiveState::Idle);
263 assert_eq!(core.mode(), CognitiveMode::Reactive);
264 assert_eq!(core.percept_count(), 0);
265 }
266
267 #[test]
268 fn test_perceive_above_threshold() {
269 let mut core = default_core();
270 let state = core.perceive(make_percept("lidar", vec![1.0, 2.0, 3.0], 0.8));
271 assert_eq!(state, CognitiveState::Perceiving);
272 assert_eq!(core.percept_count(), 1);
273 }
274
275 #[test]
276 fn test_perceive_below_threshold() {
277 let mut core = default_core();
278 core.perceive(make_percept("lidar", vec![1.0], 0.1));
279 assert_eq!(core.percept_count(), 0);
280 }
281
282 #[test]
283 fn test_think_produces_decision() {
284 let mut core = default_core();
285 core.perceive(make_percept("cam", vec![1.0, 2.0, 3.0], 0.9));
286 let decision = core.think();
287 assert!(decision.is_some());
288 let d = decision.unwrap();
289 assert_eq!(d.action.priority, 64); assert_eq!(core.decision_count(), 1);
291 }
292
293 #[test]
294 fn test_think_empty_buffer() {
295 let mut core = default_core();
296 assert!(core.think().is_none());
297 }
298
299 #[test]
300 fn test_act_returns_command() {
301 let mut core = default_core();
302 core.perceive(make_percept("cam", vec![1.0, 2.0, 3.0], 0.9));
303 let decision = core.think().unwrap();
304 let cmd = core.act(decision);
305 assert_eq!(cmd.action, ActionType::Move([1.0, 2.0, 3.0]));
306 assert_eq!(core.state(), CognitiveState::Acting);
307 }
308
309 #[test]
310 fn test_learn_adjusts_threshold() {
311 let mut core = default_core();
312 let initial = core.config.attention_threshold;
313 core.learn(Outcome {
314 success: true,
315 reward: 1.0,
316 description: "ok".into(),
317 });
318 assert!(core.config.attention_threshold < initial);
319 assert_eq!(core.state(), CognitiveState::Idle);
320 }
321
322 #[test]
323 fn test_learn_failure_raises_threshold() {
324 let mut core = default_core();
325 let initial = core.config.attention_threshold;
326 core.learn(Outcome {
327 success: false,
328 reward: -1.0,
329 description: "fail".into(),
330 });
331 assert!(core.config.attention_threshold > initial);
332 }
333
334 #[test]
335 fn test_emergency_priority() {
336 let mut core = CognitiveCore::new(CognitiveConfig {
337 mode: CognitiveMode::Emergency,
338 ..CognitiveConfig::default()
339 });
340 core.perceive(make_percept("ir", vec![5.0, 6.0, 7.0], 0.99));
341 let d = core.think().unwrap();
342 assert_eq!(d.action.priority, 255);
343 }
344
345 #[test]
346 fn test_percept_buffer_overflow() {
347 let mut core = CognitiveCore::new(CognitiveConfig {
348 max_percepts: 2,
349 ..CognitiveConfig::default()
350 });
351 core.perceive(make_percept("a", vec![1.0], 0.8));
352 core.perceive(make_percept("b", vec![2.0], 0.8));
353 core.perceive(make_percept("c", vec![3.0], 0.8));
354 assert_eq!(core.percept_count(), 2);
355 }
356}