rustchain/performance/
mod.rs

1use serde::{Deserialize, Serialize};
2use std::sync::atomic::{AtomicU64, Ordering};
3use std::sync::Arc;
4use std::time::{Duration, Instant};
5
6/// Performance metrics collector for RustChain operations
7#[derive(Debug, Clone)]
8pub struct PerformanceMonitor {
9    mission_times: Arc<AtomicU64>,
10    tool_times: Arc<AtomicU64>,
11    llm_times: Arc<AtomicU64>,
12    memory_operations: Arc<AtomicU64>,
13}
14
15impl Default for PerformanceMonitor {
16    fn default() -> Self {
17        Self::new()
18    }
19}
20
21impl PerformanceMonitor {
22    pub fn new() -> Self {
23        Self {
24            mission_times: Arc::new(AtomicU64::new(0)),
25            tool_times: Arc::new(AtomicU64::new(0)),
26            llm_times: Arc::new(AtomicU64::new(0)),
27            memory_operations: Arc::new(AtomicU64::new(0)),
28        }
29    }
30
31    pub fn record_mission_time(&self, duration: Duration) {
32        self.mission_times
33            .fetch_add(duration.as_millis() as u64, Ordering::Relaxed);
34    }
35
36    pub fn record_tool_time(&self, duration: Duration) {
37        self.tool_times
38            .fetch_add(duration.as_millis() as u64, Ordering::Relaxed);
39    }
40
41    pub fn record_llm_time(&self, duration: Duration) {
42        self.llm_times
43            .fetch_add(duration.as_millis() as u64, Ordering::Relaxed);
44    }
45
46    pub fn record_memory_operation(&self) {
47        self.memory_operations.fetch_add(1, Ordering::Relaxed);
48    }
49
50    pub fn get_stats(&self) -> PerformanceStats {
51        PerformanceStats {
52            total_mission_time_ms: self.mission_times.load(Ordering::Relaxed),
53            total_tool_time_ms: self.tool_times.load(Ordering::Relaxed),
54            total_llm_time_ms: self.llm_times.load(Ordering::Relaxed),
55            memory_operations_count: self.memory_operations.load(Ordering::Relaxed),
56        }
57    }
58}
59
60#[derive(Debug, Clone, Serialize, Deserialize)]
61pub struct PerformanceStats {
62    pub total_mission_time_ms: u64,
63    pub total_tool_time_ms: u64,
64    pub total_llm_time_ms: u64,
65    pub memory_operations_count: u64,
66}
67
68/// Timer for measuring operation performance
69pub struct Timer {
70    start: Instant,
71    name: String,
72}
73
74impl Timer {
75    pub fn new(name: impl Into<String>) -> Self {
76        Self {
77            start: Instant::now(),
78            name: name.into(),
79        }
80    }
81
82    pub fn elapsed(&self) -> Duration {
83        self.start.elapsed()
84    }
85
86    pub fn finish(self) -> Duration {
87        let duration = self.elapsed();
88        tracing::debug!(
89            "Operation '{}' completed in {}ms",
90            self.name,
91            duration.as_millis()
92        );
93        duration
94    }
95}
96
97/// Macro for easy performance timing
98#[macro_export]
99macro_rules! time_operation {
100    ($name:expr, $block:block) => {{
101        let timer = $crate::performance::Timer::new($name);
102        let result = $block;
103        timer.finish();
104        result
105    }};
106}
107
108/// Performance optimization configurations
109#[derive(Debug, Clone, Serialize, Deserialize)]
110pub struct PerformanceConfig {
111    pub enable_async_batching: bool,
112    pub batch_size: usize,
113    pub timeout_ms: u64,
114    pub max_concurrent_operations: usize,
115    pub enable_caching: bool,
116    pub cache_ttl_seconds: u64,
117}
118
119impl Default for PerformanceConfig {
120    fn default() -> Self {
121        Self {
122            enable_async_batching: true,
123            batch_size: 10,
124            timeout_ms: 5000,
125            max_concurrent_operations: 100,
126            enable_caching: true,
127            cache_ttl_seconds: 300,
128        }
129    }
130}
131
132/// Batch processor for high-throughput operations
133pub struct BatchProcessor<T> {
134    config: PerformanceConfig,
135    batch: Vec<T>,
136}
137
138impl<T> BatchProcessor<T> {
139    pub fn new(config: PerformanceConfig) -> Self {
140        let batch_size = config.batch_size;
141        Self {
142            config,
143            batch: Vec::with_capacity(batch_size),
144        }
145    }
146
147    pub fn add(&mut self, item: T) -> bool {
148        self.batch.push(item);
149        self.batch.len() >= self.config.batch_size
150    }
151
152    pub fn flush(&mut self) -> Vec<T> {
153        std::mem::take(&mut self.batch)
154    }
155
156    pub fn is_empty(&self) -> bool {
157        self.batch.is_empty()
158    }
159
160    pub fn len(&self) -> usize {
161        self.batch.len()
162    }
163}
164
165/// Connection pool for database and HTTP connections
166pub struct ConnectionPool<T> {
167    connections: Vec<T>,
168    max_size: usize,
169    current_size: usize,
170}
171
172impl<T> ConnectionPool<T> {
173    pub fn new(max_size: usize) -> Self {
174        Self {
175            connections: Vec::with_capacity(max_size),
176            max_size,
177            current_size: 0,
178        }
179    }
180
181    pub fn get(&mut self) -> Option<T> {
182        self.connections.pop()
183    }
184
185    pub fn return_connection(&mut self, conn: T) {
186        if self.current_size < self.max_size {
187            self.connections.push(conn);
188        }
189        // If at capacity, drop the connection
190    }
191
192    pub fn size(&self) -> usize {
193        self.current_size
194    }
195}
196
197#[cfg(test)]
198mod tests {
199    use super::*;
200    use std::thread;
201
202    #[test]
203    fn test_performance_monitor() {
204        let monitor = PerformanceMonitor::new();
205
206        monitor.record_mission_time(Duration::from_millis(100));
207        monitor.record_tool_time(Duration::from_millis(50));
208        monitor.record_llm_time(Duration::from_millis(200));
209        monitor.record_memory_operation();
210
211        let stats = monitor.get_stats();
212        assert_eq!(stats.total_mission_time_ms, 100);
213        assert_eq!(stats.total_tool_time_ms, 50);
214        assert_eq!(stats.total_llm_time_ms, 200);
215        assert_eq!(stats.memory_operations_count, 1);
216    }
217
218    #[test]
219    fn test_timer() {
220        let timer = Timer::new("test_operation");
221        thread::sleep(Duration::from_millis(10));
222        let elapsed = timer.finish();
223        assert!(elapsed >= Duration::from_millis(10));
224    }
225
226    #[test]
227    fn test_batch_processor() {
228        let config = PerformanceConfig {
229            batch_size: 3,
230            ..Default::default()
231        };
232        let mut processor = BatchProcessor::new(config);
233
234        assert!(!processor.add("item1"));
235        assert!(!processor.add("item2"));
236        assert!(processor.add("item3")); // Should trigger batch full
237
238        let batch = processor.flush();
239        assert_eq!(batch.len(), 3);
240        assert!(processor.is_empty());
241    }
242
243    #[test]
244    fn test_connection_pool() {
245        let mut pool = ConnectionPool::new(2);
246
247        // Pool starts empty
248        assert_eq!(pool.size(), 0);
249        assert!(pool.get().is_none());
250
251        // Add connections
252        pool.return_connection("conn1");
253        pool.return_connection("conn2");
254
255        // Get connections
256        assert!(pool.get().is_some());
257        assert!(pool.get().is_some());
258        assert!(pool.get().is_none()); // Empty again
259    }
260}