leptos_helios/performance_advanced/
memory_optimizations.rs

1//! Memory Performance Optimizations
2//!
3//! This module provides memory optimization techniques for large-scale data visualization,
4//! including memory pooling, zero-copy operations, and efficient data structures.
5
6use std::alloc::{GlobalAlloc, Layout, System};
7use std::sync::{Arc, Mutex};
8use std::collections::HashMap;
9
10/// Custom allocator for high-performance memory management
11pub struct OptimizedAllocator {
12    stats: Arc<Mutex<AllocatorStats>>,
13}
14
15#[derive(Debug, Clone)]
16pub struct AllocatorStats {
17    pub total_allocations: u64,
18    pub total_deallocations: u64,
19    pub total_bytes_allocated: u64,
20    pub total_bytes_deallocated: u64,
21    pub peak_memory_usage: u64,
22    pub current_memory_usage: u64,
23}
24
25impl OptimizedAllocator {
26    pub fn new() -> Self {
27        Self {
28            stats: Arc::new(Mutex::new(AllocatorStats {
29                total_allocations: 0,
30                total_deallocations: 0,
31                total_bytes_allocated: 0,
32                total_bytes_deallocated: 0,
33                peak_memory_usage: 0,
34                current_memory_usage: 0,
35            })),
36        }
37    }
38
39    pub fn get_stats(&self) -> AllocatorStats {
40        self.stats.lock().unwrap().clone()
41    }
42}
43
44unsafe impl GlobalAlloc for OptimizedAllocator {
45    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
46        let ptr = System.alloc(layout);
47        if !ptr.is_null() {
48            let mut stats = self.stats.lock().unwrap();
49            stats.total_allocations += 1;
50            stats.total_bytes_allocated += layout.size() as u64;
51            stats.current_memory_usage += layout.size() as u64;
52            stats.peak_memory_usage = stats.peak_memory_usage.max(stats.current_memory_usage);
53        }
54        ptr
55    }
56
57    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
58        System.dealloc(ptr, layout);
59        let mut stats = self.stats.lock().unwrap();
60        stats.total_deallocations += 1;
61        stats.total_bytes_deallocated += layout.size() as u64;
62        stats.current_memory_usage = stats.current_memory_usage.saturating_sub(layout.size() as u64);
63    }
64}
65
66/// Memory pool for frequently allocated objects
67pub struct ObjectPool<T> {
68    pool: Arc<Mutex<Vec<T>>>,
69    factory: Box<dyn Fn() -> T + Send + Sync>,
70    reset_fn: Box<dyn Fn(&mut T) + Send + Sync>,
71    max_size: usize,
72    stats: Arc<Mutex<PoolStats>>,
73}
74
75#[derive(Debug, Clone)]
76pub struct PoolStats {
77    pub objects_created: u64,
78    pub objects_reused: u64,
79    pub objects_destroyed: u64,
80    pub pool_size: usize,
81}
82
83impl<T> ObjectPool<T> {
84    pub fn new<F, R>(factory: F, reset_fn: R, max_size: usize) -> Self
85    where
86        F: Fn() -> T + Send + Sync + 'static,
87        R: Fn(&mut T) + Send + Sync + 'static,
88    {
89        Self {
90            pool: Arc::new(Mutex::new(Vec::new())),
91            factory: Box::new(factory),
92            reset_fn: Box::new(reset_fn),
93            max_size,
94            stats: Arc::new(Mutex::new(PoolStats {
95                objects_created: 0,
96                objects_reused: 0,
97                objects_destroyed: 0,
98                pool_size: 0,
99            })),
100        }
101    }
102
103    pub fn acquire(&self) -> T {
104        let mut pool = self.pool.lock().unwrap();
105        let mut stats = self.stats.lock().unwrap();
106        
107        if let Some(mut obj) = pool.pop() {
108            (self.reset_fn)(&mut obj);
109            stats.objects_reused += 1;
110            stats.pool_size = pool.len();
111            obj
112        } else {
113            stats.objects_created += 1;
114            (self.factory)()
115        }
116    }
117
118    pub fn release(&self, mut obj: T) {
119        let mut pool = self.pool.lock().unwrap();
120        let mut stats = self.stats.lock().unwrap();
121        
122        if pool.len() < self.max_size {
123            (self.reset_fn)(&mut obj);
124            pool.push(obj);
125            stats.pool_size = pool.len();
126        } else {
127            stats.objects_destroyed += 1;
128        }
129    }
130
131    pub fn get_stats(&self) -> PoolStats {
132        self.stats.lock().unwrap().clone()
133    }
134
135    pub fn clear(&self) {
136        let mut pool = self.pool.lock().unwrap();
137        let mut stats = self.stats.lock().unwrap();
138        pool.clear();
139        stats.pool_size = 0;
140    }
141}
142
143/// Zero-copy string interning for memory efficiency
144pub struct StringInterner {
145    strings: Arc<Mutex<HashMap<String, usize>>>,
146    reverse: Arc<Mutex<Vec<String>>>,
147    stats: Arc<Mutex<InternerStats>>,
148}
149
150#[derive(Debug, Clone)]
151pub struct InternerStats {
152    pub total_strings: usize,
153    pub unique_strings: usize,
154    pub memory_saved: usize,
155}
156
157impl StringInterner {
158    pub fn new() -> Self {
159        Self {
160            strings: Arc::new(Mutex::new(HashMap::new())),
161            reverse: Arc::new(Mutex::new(Vec::new())),
162            stats: Arc::new(Mutex::new(InternerStats {
163                total_strings: 0,
164                unique_strings: 0,
165                memory_saved: 0,
166            })),
167        }
168    }
169
170    pub fn intern(&self, s: &str) -> usize {
171        let mut strings = self.strings.lock().unwrap();
172        let mut reverse = self.reverse.lock().unwrap();
173        let mut stats = self.stats.lock().unwrap();
174        
175        stats.total_strings += 1;
176        
177        if let Some(&id) = strings.get(s) {
178            stats.memory_saved += s.len();
179            id
180        } else {
181            let id = reverse.len();
182            strings.insert(s.to_string(), id);
183            reverse.push(s.to_string());
184            stats.unique_strings += 1;
185            id
186        }
187    }
188
189    pub fn get(&self, id: usize) -> Option<String> {
190        let reverse = self.reverse.lock().unwrap();
191        reverse.get(id).cloned()
192    }
193
194    pub fn get_stats(&self) -> InternerStats {
195        self.stats.lock().unwrap().clone()
196    }
197}
198
199/// Memory-efficient circular buffer
200pub struct CircularBuffer<T> {
201    buffer: Vec<T>,
202    head: usize,
203    tail: usize,
204    size: usize,
205    capacity: usize,
206}
207
208impl<T: Default> CircularBuffer<T> {
209    pub fn new(capacity: usize) -> Self {
210        Self {
211            buffer: Vec::with_capacity(capacity),
212            head: 0,
213            tail: 0,
214            size: 0,
215            capacity,
216        }
217    }
218
219    pub fn push(&mut self, item: T) -> Option<T> {
220        let mut evicted = None;
221        
222        if self.size == self.capacity {
223            // Buffer is full, evict oldest item
224            evicted = Some(std::mem::replace(&mut self.buffer[self.head], item));
225            self.head = (self.head + 1) % self.capacity;
226        } else {
227            // Buffer has space
228            if self.buffer.len() < self.capacity {
229                self.buffer.push(item);
230            } else {
231                self.buffer[self.tail] = item;
232            }
233            self.tail = (self.tail + 1) % self.capacity;
234            self.size += 1;
235        }
236        
237        evicted
238    }
239
240    pub fn pop(&mut self) -> Option<T> {
241        if self.size == 0 {
242            None
243        } else {
244            let item = std::mem::take(&mut self.buffer[self.head]);
245            self.head = (self.head + 1) % self.capacity;
246            self.size -= 1;
247            Some(item)
248        }
249    }
250
251    pub fn peek(&self) -> Option<&T> {
252        if self.size == 0 {
253            None
254        } else {
255            Some(&self.buffer[self.head])
256        }
257    }
258
259    pub fn len(&self) -> usize {
260        self.size
261    }
262
263    pub fn is_empty(&self) -> bool {
264        self.size == 0
265    }
266
267    pub fn is_full(&self) -> bool {
268        self.size == self.capacity
269    }
270
271    pub fn clear(&mut self) {
272        self.head = 0;
273        self.tail = 0;
274        self.size = 0;
275    }
276}
277
278/// Memory-efficient bit set for large datasets
279pub struct BitSet {
280    bits: Vec<u64>,
281    size: usize,
282}
283
284impl BitSet {
285    pub fn new(size: usize) -> Self {
286        let words = (size + 63) / 64; // Round up to nearest word
287        Self {
288            bits: vec![0; words],
289            size,
290        }
291    }
292
293    pub fn set(&mut self, index: usize) {
294        if index < self.size {
295            let word = index / 64;
296            let bit = index % 64;
297            self.bits[word] |= 1 << bit;
298        }
299    }
300
301    pub fn clear(&mut self, index: usize) {
302        if index < self.size {
303            let word = index / 64;
304            let bit = index % 64;
305            self.bits[word] &= !(1 << bit);
306        }
307    }
308
309    pub fn get(&self, index: usize) -> bool {
310        if index < self.size {
311            let word = index / 64;
312            let bit = index % 64;
313            (self.bits[word] & (1 << bit)) != 0
314        } else {
315            false
316        }
317    }
318
319    pub fn count_ones(&self) -> usize {
320        self.bits.iter().map(|&word| word.count_ones() as usize).sum()
321    }
322
323    pub fn clear_all(&mut self) {
324        for word in &mut self.bits {
325            *word = 0;
326        }
327    }
328
329    pub fn size(&self) -> usize {
330        self.size
331    }
332}
333
334/// Memory-efficient sparse matrix representation
335pub struct SparseMatrix {
336    data: HashMap<(usize, usize), f64>,
337    rows: usize,
338    cols: usize,
339}
340
341impl SparseMatrix {
342    pub fn new(rows: usize, cols: usize) -> Self {
343        Self {
344            data: HashMap::new(),
345            rows,
346            cols,
347        }
348    }
349
350    pub fn set(&mut self, row: usize, col: usize, value: f64) {
351        if row < self.rows && col < self.cols {
352            if value != 0.0 {
353                self.data.insert((row, col), value);
354            } else {
355                self.data.remove(&(row, col));
356            }
357        }
358    }
359
360    pub fn get(&self, row: usize, col: usize) -> f64 {
361        if row < self.rows && col < self.cols {
362            self.data.get(&(row, col)).copied().unwrap_or(0.0)
363        } else {
364            0.0
365        }
366    }
367
368    pub fn non_zero_count(&self) -> usize {
369        self.data.len()
370    }
371
372    pub fn memory_usage(&self) -> usize {
373        self.data.len() * (std::mem::size_of::<(usize, usize)>() + std::mem::size_of::<f64>())
374    }
375
376    pub fn density(&self) -> f64 {
377        self.non_zero_count() as f64 / (self.rows * self.cols) as f64
378    }
379}
380
381impl Default for StringInterner {
382    fn default() -> Self {
383        Self::new()
384    }
385}