omega_attention/lib.rs
1//! # Omega Attention - Brain-Like Selective Processing
2//!
3//! Implements 39 attention mechanisms inspired by transformer architectures and
4//! neuroscience research, plus brain-like attention control systems.
5//!
6//! ## Features
7//!
8//! - **39 Attention Mechanisms**: Flash, Linear, Sparse, Hyperbolic, Graph, Memory-augmented
9//! - **Top-Down Attention**: Goal-driven, task-relevant selection
10//! - **Bottom-Up Attention**: Stimulus-driven, salience-based capture
11//! - **Working Memory Gating**: Input/output/forget gates for WM control
12//! - **Attention Spotlight**: Winner-take-all competition
13//!
14//! ## Architecture
15//!
16//! ```text
17//! ┌─────────────────────────────────────────────────────────────┐
18//! │ ATTENTION SYSTEM │
19//! ├─────────────────────────────────────────────────────────────┤
20//! │ │
21//! │ ┌────────────────────┐ ┌────────────────────┐ │
22//! │ │ TOP-DOWN │ │ BOTTOM-UP │ │
23//! │ │ (Goal-driven) │ │ (Salience) │ │
24//! │ │ │ │ │ │
25//! │ │ • Task relevance │ │ • Novelty │ │
26//! │ │ • Expected value │ │ • Contrast │ │
27//! │ │ • Memory match │ │ • Motion │ │
28//! │ └────────┬───────────┘ └────────┬───────────┘ │
29//! │ │ │ │
30//! │ └───────────┬─────────────┘ │
31//! │ ▼ │
32//! │ ┌───────────────────────┐ │
33//! │ │ ATTENTION CONTROL │ │
34//! │ │ (Priority Map) │ │
35//! │ └───────────┬───────────┘ │
36//! │ ▼ │
37//! │ ┌───────────────────────┐ │
38//! │ │ ATTENTION MECHANISMS│ │
39//! │ │ (39 types) │ │
40//! │ └───────────┬───────────┘ │
41//! │ ▼ │
42//! │ ┌───────────────────────┐ │
43//! │ │ WORKING MEMORY │ │
44//! │ │ (Gated Access) │ │
45//! │ └───────────────────────┘ │
46//! │ │
47//! └─────────────────────────────────────────────────────────────┘
48//! ```
49
50pub mod mechanisms;
51pub mod controller;
52pub mod working_memory;
53pub mod salience;
54
55pub use mechanisms::{
56 AttentionMechanism, AttentionType, AttentionOutput,
57 ScaledDotProductAttention, FlashAttention, LinearAttention,
58 SparseAttention, HyperbolicAttention, GraphAttention,
59 MemoryAugmentedAttention, MultiHeadAttention,
60};
61pub use controller::{AttentionController, AttentionConfig, PriorityMap};
62pub use working_memory::{WorkingMemory, WMGate, WorkingMemoryItem};
63pub use salience::{SalienceMap, SalienceComputer, SalienceFeature};
64
65use thiserror::Error;
66
67/// Errors that can occur in the attention module
68#[derive(Error, Debug)]
69pub enum AttentionError {
70 #[error("Invalid dimensions: expected {expected}, got {got}")]
71 InvalidDimensions { expected: usize, got: usize },
72
73 #[error("Empty input")]
74 EmptyInput,
75
76 #[error("Attention computation failed: {0}")]
77 ComputationError(String),
78
79 #[error("Working memory full")]
80 WorkingMemoryFull,
81
82 #[error("Configuration error: {0}")]
83 ConfigError(String),
84}
85
86pub type Result<T> = std::result::Result<T, AttentionError>;
87
88/// Main attention system orchestrating all components
89pub struct AttentionSystem {
90 controller: AttentionController,
91 working_memory: WorkingMemory,
92 salience: SalienceComputer,
93}
94
95impl AttentionSystem {
96 /// Create a new attention system
97 pub fn new(config: AttentionConfig) -> Self {
98 Self {
99 controller: AttentionController::new(config),
100 working_memory: WorkingMemory::new(7), // 7±2 capacity
101 salience: SalienceComputer::new(),
102 }
103 }
104
105 /// Process input through attention system
106 pub fn attend(
107 &mut self,
108 input: &[f64],
109 goals: &[f64],
110 context: &[f64],
111 ) -> Result<AttentionOutput> {
112 // 1. Compute bottom-up salience
113 let salience = self.salience.compute(input);
114
115 // 2. Compute top-down relevance based on goals
116 let relevance = self.controller.compute_relevance(input, goals);
117
118 // 3. Combine into priority map
119 let priority = self.controller.combine_priorities(&salience, &relevance);
120
121 // 4. Apply attention mechanism
122 let output = self.controller.apply_attention(input, &priority, context)?;
123
124 // 5. Update working memory if high priority
125 if output.max_attention > 0.5 {
126 let item = WorkingMemoryItem::new(
127 output.attended_values.clone(),
128 output.max_attention,
129 );
130 self.working_memory.try_store(item);
131 }
132
133 Ok(output)
134 }
135
136 /// Get current working memory contents
137 pub fn working_memory(&self) -> &WorkingMemory {
138 &self.working_memory
139 }
140
141 /// Get mutable access to working memory
142 pub fn working_memory_mut(&mut self) -> &mut WorkingMemory {
143 &mut self.working_memory
144 }
145
146 /// Focus attention on specific item (top-down control)
147 pub fn focus(&mut self, target: &[f64]) {
148 self.controller.set_focus(target);
149 }
150
151 /// Get current attention state
152 pub fn state(&self) -> AttentionState {
153 AttentionState {
154 wm_items: self.working_memory.len(),
155 wm_capacity: self.working_memory.capacity(),
156 current_focus: self.controller.current_focus(),
157 }
158 }
159}
160
161/// Current state of attention system
162#[derive(Debug, Clone)]
163pub struct AttentionState {
164 pub wm_items: usize,
165 pub wm_capacity: usize,
166 pub current_focus: Option<Vec<f64>>,
167}
168
169#[cfg(test)]
170mod tests {
171 use super::*;
172
173 #[test]
174 fn test_attention_system_creation() {
175 let config = AttentionConfig::default();
176 let system = AttentionSystem::new(config);
177
178 let state = system.state();
179 assert_eq!(state.wm_items, 0);
180 assert_eq!(state.wm_capacity, 7);
181 }
182}