scirs2_sparse/neural_adaptive_sparse/
config.rs

1//! Configuration for neural-adaptive sparse matrix processing
2//!
3//! This module contains configuration structures and enums for the neural
4//! adaptive sparse matrix processor system.
5
6use super::reinforcement_learning::RLAlgorithm;
7
8/// Neural-adaptive sparse matrix processor configuration
9#[derive(Debug, Clone)]
10pub struct NeuralAdaptiveConfig {
11    /// Number of hidden layers in the neural network
12    pub hidden_layers: usize,
13    /// Neurons per hidden layer
14    pub neurons_per_layer: usize,
15    /// Learning rate for adaptive optimization
16    pub learningrate: f64,
17    /// Memory capacity for pattern learning
18    pub memory_capacity: usize,
19    /// Enable reinforcement learning
20    pub reinforcement_learning: bool,
21    /// Attention mechanism configuration
22    pub attention_heads: usize,
23    /// Enable transformer-style self-attention
24    pub self_attention: bool,
25    /// Reinforcement learning algorithm
26    pub rl_algorithm: RLAlgorithm,
27    /// Exploration rate for RL
28    pub exploration_rate: f64,
29    /// Discount factor for future rewards
30    pub discountfactor: f64,
31    /// Experience replay buffer size
32    pub replay_buffer_size: usize,
33    /// Transformer model dimension
34    pub modeldim: usize,
35    /// Feed-forward network dimension in transformer
36    pub ff_dim: usize,
37    /// Number of transformer layers
38    pub transformer_layers: usize,
39}
40
41impl Default for NeuralAdaptiveConfig {
42    fn default() -> Self {
43        Self {
44            hidden_layers: 3,
45            neurons_per_layer: 64,
46            learningrate: 0.001,
47            memory_capacity: 10000,
48            reinforcement_learning: true,
49            attention_heads: 8,
50            self_attention: true,
51            rl_algorithm: RLAlgorithm::DQN,
52            exploration_rate: 0.1,
53            discountfactor: 0.99,
54            replay_buffer_size: 10000,
55            modeldim: 512,
56            ff_dim: 2048,
57            transformer_layers: 6,
58        }
59    }
60}
61
62impl NeuralAdaptiveConfig {
63    /// Create a new configuration with default values
64    pub fn new() -> Self {
65        Self::default()
66    }
67
68    /// Set the number of hidden layers
69    pub fn with_hidden_layers(mut self, layers: usize) -> Self {
70        self.hidden_layers = layers;
71        self
72    }
73
74    /// Set neurons per layer
75    pub fn with_neurons_per_layer(mut self, neurons: usize) -> Self {
76        self.neurons_per_layer = neurons;
77        self
78    }
79
80    /// Set learning rate
81    pub fn with_learning_rate(mut self, rate: f64) -> Self {
82        self.learningrate = rate;
83        self
84    }
85
86    /// Set memory capacity
87    pub fn with_memory_capacity(mut self, capacity: usize) -> Self {
88        self.memory_capacity = capacity;
89        self
90    }
91
92    /// Enable or disable reinforcement learning
93    pub fn with_reinforcement_learning(mut self, enabled: bool) -> Self {
94        self.reinforcement_learning = enabled;
95        self
96    }
97
98    /// Set number of attention heads
99    pub fn with_attention_heads(mut self, heads: usize) -> Self {
100        self.attention_heads = heads;
101        self
102    }
103
104    /// Enable or disable self-attention
105    pub fn with_self_attention(mut self, enabled: bool) -> Self {
106        self.self_attention = enabled;
107        self
108    }
109
110    /// Set RL algorithm
111    pub fn with_rl_algorithm(mut self, algorithm: RLAlgorithm) -> Self {
112        self.rl_algorithm = algorithm;
113        self
114    }
115
116    /// Set exploration rate
117    pub fn with_exploration_rate(mut self, rate: f64) -> Self {
118        self.exploration_rate = rate;
119        self
120    }
121
122    /// Set discount factor
123    pub fn with_discount_factor(mut self, factor: f64) -> Self {
124        self.discountfactor = factor;
125        self
126    }
127
128    /// Set replay buffer size
129    pub fn with_replay_buffer_size(mut self, size: usize) -> Self {
130        self.replay_buffer_size = size;
131        self
132    }
133
134    /// Set transformer model dimension
135    pub fn with_model_dim(mut self, dim: usize) -> Self {
136        self.modeldim = dim;
137        self
138    }
139
140    /// Set feed-forward dimension
141    pub fn with_ff_dim(mut self, dim: usize) -> Self {
142        self.ff_dim = dim;
143        self
144    }
145
146    /// Set number of transformer layers
147    pub fn with_transformer_layers(mut self, layers: usize) -> Self {
148        self.transformer_layers = layers;
149        self
150    }
151
152    /// Validate configuration parameters
153    pub fn validate(&self) -> Result<(), String> {
154        if self.hidden_layers == 0 {
155            return Err("Hidden layers must be greater than 0".to_string());
156        }
157
158        if self.neurons_per_layer == 0 {
159            return Err("Neurons per layer must be greater than 0".to_string());
160        }
161
162        if self.learningrate <= 0.0 || self.learningrate > 1.0 {
163            return Err("Learning rate must be between 0 and 1".to_string());
164        }
165
166        if self.memory_capacity == 0 {
167            return Err("Memory capacity must be greater than 0".to_string());
168        }
169
170        if self.attention_heads == 0 {
171            return Err("Attention heads must be greater than 0".to_string());
172        }
173
174        if self.exploration_rate < 0.0 || self.exploration_rate > 1.0 {
175            return Err("Exploration rate must be between 0 and 1".to_string());
176        }
177
178        if self.discountfactor < 0.0 || self.discountfactor > 1.0 {
179            return Err("Discount factor must be between 0 and 1".to_string());
180        }
181
182        if self.replay_buffer_size == 0 {
183            return Err("Replay buffer size must be greater than 0".to_string());
184        }
185
186        if self.modeldim == 0 {
187            return Err("Model dimension must be greater than 0".to_string());
188        }
189
190        if self.ff_dim == 0 {
191            return Err("Feed-forward dimension must be greater than 0".to_string());
192        }
193
194        if self.transformer_layers == 0 {
195            return Err("Transformer layers must be greater than 0".to_string());
196        }
197
198        if self.modeldim % self.attention_heads != 0 {
199            return Err(
200                "Model dimension must be divisible by number of attention heads".to_string(),
201            );
202        }
203
204        Ok(())
205    }
206
207    /// Create a lightweight configuration for testing
208    pub fn lightweight() -> Self {
209        Self {
210            hidden_layers: 2,
211            neurons_per_layer: 16,
212            learningrate: 0.01,
213            memory_capacity: 100,
214            reinforcement_learning: true,
215            attention_heads: 2,
216            self_attention: false,
217            rl_algorithm: RLAlgorithm::DQN,
218            exploration_rate: 0.3,
219            discountfactor: 0.9,
220            replay_buffer_size: 100,
221            modeldim: 64,
222            ff_dim: 128,
223            transformer_layers: 2,
224        }
225    }
226
227    /// Create a high-performance configuration
228    pub fn high_performance() -> Self {
229        Self {
230            hidden_layers: 5,
231            neurons_per_layer: 128,
232            learningrate: 0.0001,
233            memory_capacity: 50000,
234            reinforcement_learning: true,
235            attention_heads: 16,
236            self_attention: true,
237            rl_algorithm: RLAlgorithm::PPO,
238            exploration_rate: 0.05,
239            discountfactor: 0.995,
240            replay_buffer_size: 50000,
241            modeldim: 1024,
242            ff_dim: 4096,
243            transformer_layers: 12,
244        }
245    }
246
247    /// Create a memory-efficient configuration
248    pub fn memory_efficient() -> Self {
249        Self {
250            hidden_layers: 2,
251            neurons_per_layer: 32,
252            learningrate: 0.005,
253            memory_capacity: 1000,
254            reinforcement_learning: false,
255            attention_heads: 4,
256            self_attention: false,
257            rl_algorithm: RLAlgorithm::DQN,
258            exploration_rate: 0.1,
259            discountfactor: 0.99,
260            replay_buffer_size: 1000,
261            modeldim: 256,
262            ff_dim: 512,
263            transformer_layers: 3,
264        }
265    }
266}