leptos_helios/performance_advanced/
runtime_optimizations.rs

1//! Runtime Performance Optimizations
2//!
3//! This module provides runtime performance optimizations for leptos-helios,
4//! including memory pooling, caching, and algorithmic improvements.
5
6use std::collections::HashMap;
7use std::sync::{Arc, Mutex};
8use std::time::{Duration, Instant};
9
10/// High-performance memory pool for frequently allocated objects
11pub struct MemoryPool<T> {
12    pool: Arc<Mutex<Vec<T>>>,
13    factory: Box<dyn Fn() -> T + Send + Sync>,
14    max_size: usize,
15    stats: Arc<Mutex<PoolStats>>,
16}
17
18#[derive(Debug, Clone)]
19pub struct PoolStats {
20    pub allocations: u64,
21    pub deallocations: u64,
22    pub pool_hits: u64,
23    pub pool_misses: u64,
24}
25
26impl<T> MemoryPool<T> {
27    pub fn new<F>(factory: F, max_size: usize) -> Self
28    where
29        F: Fn() -> T + Send + Sync + 'static,
30    {
31        Self {
32            pool: Arc::new(Mutex::new(Vec::new())),
33            factory: Box::new(factory),
34            max_size,
35            stats: Arc::new(Mutex::new(PoolStats {
36                allocations: 0,
37                deallocations: 0,
38                pool_hits: 0,
39                pool_misses: 0,
40            })),
41        }
42    }
43
44    pub fn acquire(&self) -> T {
45        let mut pool = self.pool.lock().unwrap();
46        let mut stats = self.stats.lock().unwrap();
47        
48        if let Some(item) = pool.pop() {
49            stats.pool_hits += 1;
50            stats.allocations += 1;
51            item
52        } else {
53            stats.pool_misses += 1;
54            stats.allocations += 1;
55            (self.factory)()
56        }
57    }
58
59    pub fn release(&self, item: T) {
60        let mut pool = self.pool.lock().unwrap();
61        let mut stats = self.stats.lock().unwrap();
62        
63        if pool.len() < self.max_size {
64            pool.push(item);
65        }
66        stats.deallocations += 1;
67    }
68
69    pub fn get_stats(&self) -> PoolStats {
70        self.stats.lock().unwrap().clone()
71    }
72
73    pub fn clear(&self) {
74        let mut pool = self.pool.lock().unwrap();
75        pool.clear();
76    }
77}
78
79/// High-performance cache with LRU eviction
80pub struct LruCache<K, V> {
81    cache: Arc<Mutex<HashMap<K, (V, Instant)>>>,
82    max_size: usize,
83    ttl: Duration,
84    stats: Arc<Mutex<CacheStats>>,
85}
86
87#[derive(Debug, Clone)]
88pub struct CacheStats {
89    pub hits: u64,
90    pub misses: u64,
91    pub evictions: u64,
92    pub size: usize,
93}
94
95impl<K, V> LruCache<K, V>
96where
97    K: Clone + std::hash::Hash + Eq + Send + Sync,
98    V: Clone + Send + Sync,
99{
100    pub fn new(max_size: usize, ttl: Duration) -> Self {
101        Self {
102            cache: Arc::new(Mutex::new(HashMap::new())),
103            max_size,
104            ttl,
105            stats: Arc::new(Mutex::new(CacheStats {
106                hits: 0,
107                misses: 0,
108                evictions: 0,
109                size: 0,
110            })),
111        }
112    }
113
114    pub fn get(&self, key: &K) -> Option<V> {
115        let mut cache = self.cache.lock().unwrap();
116        let mut stats = self.stats.lock().unwrap();
117        
118        if let Some((value, timestamp)) = cache.get(key) {
119            if timestamp.elapsed() < self.ttl {
120                stats.hits += 1;
121                Some(value.clone())
122            } else {
123                cache.remove(key);
124                stats.misses += 1;
125                stats.evictions += 1;
126                stats.size = cache.len();
127                None
128            }
129        } else {
130            stats.misses += 1;
131            None
132        }
133    }
134
135    pub fn insert(&self, key: K, value: V) {
136        let mut cache = self.cache.lock().unwrap();
137        let mut stats = self.stats.lock().unwrap();
138        
139        // Evict oldest entries if cache is full
140        while cache.len() >= self.max_size {
141            if let Some(oldest_key) = cache.keys().next().cloned() {
142                cache.remove(&oldest_key);
143                stats.evictions += 1;
144            }
145        }
146        
147        cache.insert(key, (value, Instant::now()));
148        stats.size = cache.len();
149    }
150
151    pub fn get_stats(&self) -> CacheStats {
152        let stats = self.stats.lock().unwrap();
153        let cache = self.cache.lock().unwrap();
154        CacheStats {
155            hits: stats.hits,
156            misses: stats.misses,
157            evictions: stats.evictions,
158            size: cache.len(),
159        }
160    }
161
162    pub fn clear(&self) {
163        let mut cache = self.cache.lock().unwrap();
164        let mut stats = self.stats.lock().unwrap();
165        cache.clear();
166        stats.size = 0;
167    }
168}
169
170/// Optimized data structure for fast lookups and insertions
171pub struct FastHashMap<K, V> {
172    data: Arc<Mutex<HashMap<K, V>>>,
173    stats: Arc<Mutex<HashMapStats>>,
174}
175
176#[derive(Debug, Clone)]
177pub struct HashMapStats {
178    pub insertions: u64,
179    pub lookups: u64,
180    pub removals: u64,
181    pub size: usize,
182}
183
184impl<K, V> FastHashMap<K, V>
185where
186    K: Clone + std::hash::Hash + Eq + Send + Sync,
187    V: Clone + Send + Sync,
188{
189    pub fn new() -> Self {
190        Self {
191            data: Arc::new(Mutex::new(HashMap::new())),
192            stats: Arc::new(Mutex::new(HashMapStats {
193                insertions: 0,
194                lookups: 0,
195                removals: 0,
196                size: 0,
197            })),
198        }
199    }
200
201    pub fn insert(&self, key: K, value: V) -> Option<V> {
202        let mut data = self.data.lock().unwrap();
203        let mut stats = self.stats.lock().unwrap();
204        
205        let result = data.insert(key, value);
206        stats.insertions += 1;
207        stats.size = data.len();
208        result
209    }
210
211    pub fn get(&self, key: &K) -> Option<V> {
212        let data = self.data.lock().unwrap();
213        let mut stats = self.stats.lock().unwrap();
214        
215        let result = data.get(key).cloned();
216        stats.lookups += 1;
217        result
218    }
219
220    pub fn remove(&self, key: &K) -> Option<V> {
221        let mut data = self.data.lock().unwrap();
222        let mut stats = self.stats.lock().unwrap();
223        
224        let result = data.remove(key);
225        stats.removals += 1;
226        stats.size = data.len();
227        result
228    }
229
230    pub fn get_stats(&self) -> HashMapStats {
231        let stats = self.stats.lock().unwrap();
232        let data = self.data.lock().unwrap();
233        HashMapStats {
234            insertions: stats.insertions,
235            lookups: stats.lookups,
236            removals: stats.removals,
237            size: data.len(),
238        }
239    }
240
241    pub fn clear(&self) {
242        let mut data = self.data.lock().unwrap();
243        let mut stats = self.stats.lock().unwrap();
244        data.clear();
245        stats.size = 0;
246    }
247}
248
249/// Performance monitoring and profiling utilities
250pub struct PerformanceProfiler {
251    measurements: Arc<Mutex<HashMap<String, Vec<Duration>>>>,
252    active_timers: Arc<Mutex<HashMap<String, Instant>>>,
253}
254
255impl PerformanceProfiler {
256    pub fn new() -> Self {
257        Self {
258            measurements: Arc::new(Mutex::new(HashMap::new())),
259            active_timers: Arc::new(Mutex::new(HashMap::new())),
260        }
261    }
262
263    pub fn start_timer(&self, name: &str) {
264        let mut timers = self.active_timers.lock().unwrap();
265        timers.insert(name.to_string(), Instant::now());
266    }
267
268    pub fn end_timer(&self, name: &str) -> Option<Duration> {
269        let mut timers = self.active_timers.lock().unwrap();
270        let mut measurements = self.measurements.lock().unwrap();
271        
272        if let Some(start_time) = timers.remove(name) {
273            let duration = start_time.elapsed();
274            measurements.entry(name.to_string()).or_insert_with(Vec::new).push(duration);
275            Some(duration)
276        } else {
277            None
278        }
279    }
280
281    pub fn get_average_time(&self, name: &str) -> Option<Duration> {
282        let measurements = self.measurements.lock().unwrap();
283        if let Some(times) = measurements.get(name) {
284            if !times.is_empty() {
285                let total: Duration = times.iter().sum();
286                Some(total / times.len() as u32)
287            } else {
288                None
289            }
290        } else {
291            None
292        }
293    }
294
295    pub fn get_stats(&self) -> HashMap<String, PerformanceStats> {
296        let measurements = self.measurements.lock().unwrap();
297        let mut stats = HashMap::new();
298        
299        for (name, times) in measurements.iter() {
300            if !times.is_empty() {
301                let total: Duration = times.iter().sum();
302                let average = total / times.len() as u32;
303                let min = *times.iter().min().unwrap();
304                let max = *times.iter().max().unwrap();
305                
306                stats.insert(name.clone(), PerformanceStats {
307                    count: times.len(),
308                    total_time: total,
309                    average_time: average,
310                    min_time: min,
311                    max_time: max,
312                });
313            }
314        }
315        
316        stats
317    }
318}
319
320#[derive(Debug, Clone)]
321pub struct PerformanceStats {
322    pub count: usize,
323    pub total_time: Duration,
324    pub average_time: Duration,
325    pub min_time: Duration,
326    pub max_time: Duration,
327}
328
329impl Default for PerformanceProfiler {
330    fn default() -> Self {
331        Self::new()
332    }
333}