ruvector_scipix/optimize/
memory.rs

1//! Memory optimization utilities
2//!
3//! Provides object pooling, memory-mapped file loading, and zero-copy operations.
4
5use std::path::Path;
6use std::sync::{Arc, Mutex};
7use std::collections::VecDeque;
8use std::fs::File;
9use memmap2::{Mmap, MmapOptions};
10
11use crate::error::{Result, ScipixError};
12use super::memory_opt_enabled;
13
14/// Object pool for reusable buffers
15pub struct BufferPool<T> {
16    pool: Arc<Mutex<VecDeque<T>>>,
17    factory: Arc<dyn Fn() -> T + Send + Sync>,
18    #[allow(dead_code)]
19    max_size: usize,
20}
21
22impl<T: Send + 'static> BufferPool<T> {
23    /// Create a new buffer pool
24    pub fn new<F>(factory: F, initial_size: usize, max_size: usize) -> Self
25    where
26        F: Fn() -> T + Send + Sync + 'static,
27    {
28        let factory = Arc::new(factory);
29        let pool = Arc::new(Mutex::new(VecDeque::with_capacity(max_size)));
30
31        // Pre-allocate initial buffers
32        if memory_opt_enabled() {
33            let mut pool_lock = pool.lock().unwrap();
34            for _ in 0..initial_size {
35                pool_lock.push_back(factory());
36            }
37        }
38
39        Self {
40            pool,
41            factory,
42            max_size,
43        }
44    }
45
46    /// Acquire a buffer from the pool
47    pub fn acquire(&self) -> PooledBuffer<T> {
48        let buffer = if memory_opt_enabled() {
49            self.pool.lock().unwrap().pop_front()
50                .unwrap_or_else(|| (self.factory)())
51        } else {
52            (self.factory)()
53        };
54
55        PooledBuffer {
56            buffer: Some(buffer),
57            pool: self.pool.clone(),
58        }
59    }
60
61    /// Get current pool size
62    pub fn size(&self) -> usize {
63        self.pool.lock().unwrap().len()
64    }
65
66    /// Clear the pool
67    pub fn clear(&self) {
68        self.pool.lock().unwrap().clear();
69    }
70}
71
72/// RAII guard for pooled buffers
73pub struct PooledBuffer<T> {
74    buffer: Option<T>,
75    pool: Arc<Mutex<VecDeque<T>>>,
76}
77
78impl<T> PooledBuffer<T> {
79    /// Get mutable reference to buffer
80    pub fn get_mut(&mut self) -> &mut T {
81        self.buffer.as_mut().unwrap()
82    }
83
84    /// Get immutable reference to buffer
85    pub fn get(&self) -> &T {
86        self.buffer.as_ref().unwrap()
87    }
88}
89
90impl<T> Drop for PooledBuffer<T> {
91    fn drop(&mut self) {
92        if memory_opt_enabled() {
93            if let Some(buffer) = self.buffer.take() {
94                let mut pool = self.pool.lock().unwrap();
95                pool.push_back(buffer);
96            }
97        }
98    }
99}
100
101impl<T> std::ops::Deref for PooledBuffer<T> {
102    type Target = T;
103
104    fn deref(&self) -> &Self::Target {
105        self.buffer.as_ref().unwrap()
106    }
107}
108
109impl<T> std::ops::DerefMut for PooledBuffer<T> {
110    fn deref_mut(&mut self) -> &mut Self::Target {
111        self.buffer.as_mut().unwrap()
112    }
113}
114
115/// Memory-mapped model file
116pub struct MmapModel {
117    _mmap: Mmap,
118    data: *const u8,
119    len: usize,
120}
121
122unsafe impl Send for MmapModel {}
123unsafe impl Sync for MmapModel {}
124
125impl MmapModel {
126    /// Load model from file using memory mapping
127    pub fn from_file<P: AsRef<Path>>(path: P) -> Result<Self> {
128        let file = File::open(path.as_ref())
129            .map_err(|e| ScipixError::Io(e))?;
130
131        let mmap = unsafe {
132            MmapOptions::new()
133                .map(&file)
134                .map_err(|e| ScipixError::Io(e))?
135        };
136
137        let data = mmap.as_ptr();
138        let len = mmap.len();
139
140        Ok(Self {
141            _mmap: mmap,
142            data,
143            len,
144        })
145    }
146
147    /// Get slice of model data
148    pub fn as_slice(&self) -> &[u8] {
149        unsafe { std::slice::from_raw_parts(self.data, self.len) }
150    }
151
152    /// Get size of mapped region
153    pub fn len(&self) -> usize {
154        self.len
155    }
156
157    /// Check if empty
158    pub fn is_empty(&self) -> bool {
159        self.len == 0
160    }
161}
162
163/// Zero-copy image view
164pub struct ImageView<'a> {
165    data: &'a [u8],
166    width: u32,
167    height: u32,
168    channels: u8,
169}
170
171impl<'a> ImageView<'a> {
172    /// Create new image view from raw data
173    pub fn new(data: &'a [u8], width: u32, height: u32, channels: u8) -> Result<Self> {
174        let expected_len = (width * height * channels as u32) as usize;
175        if data.len() != expected_len {
176            return Err(ScipixError::InvalidInput(format!(
177                "Invalid data length: expected {}, got {}",
178                expected_len,
179                data.len()
180            )));
181        }
182
183        Ok(Self {
184            data,
185            width,
186            height,
187            channels,
188        })
189    }
190
191    /// Get pixel at (x, y)
192    pub fn pixel(&self, x: u32, y: u32) -> &[u8] {
193        let offset = ((y * self.width + x) * self.channels as u32) as usize;
194        &self.data[offset..offset + self.channels as usize]
195    }
196
197    /// Get raw data slice
198    pub fn data(&self) -> &[u8] {
199        self.data
200    }
201
202    /// Get dimensions
203    pub fn dimensions(&self) -> (u32, u32) {
204        (self.width, self.height)
205    }
206
207    /// Get number of channels
208    pub fn channels(&self) -> u8 {
209        self.channels
210    }
211
212    /// Create subview (region of interest)
213    pub fn subview(&self, x: u32, y: u32, width: u32, height: u32) -> Result<Self> {
214        if x + width > self.width || y + height > self.height {
215            return Err(ScipixError::InvalidInput(
216                "Subview out of bounds".to_string()
217            ));
218        }
219
220        // For simplicity, this creates a copy. True zero-copy would need stride support
221        let mut subview_data = Vec::new();
222        for row in y..y + height {
223            let start = ((row * self.width + x) * self.channels as u32) as usize;
224            let end = start + (width * self.channels as u32) as usize;
225            subview_data.extend_from_slice(&self.data[start..end]);
226        }
227
228        // This temporarily leaks memory - in production, use arena allocator
229        let leaked = Box::leak(subview_data.into_boxed_slice());
230
231        Ok(Self {
232            data: leaked,
233            width,
234            height,
235            channels: self.channels,
236        })
237    }
238}
239
240/// Arena allocator for temporary allocations
241pub struct Arena {
242    buffer: Vec<u8>,
243    offset: usize,
244}
245
246impl Arena {
247    /// Create new arena with capacity
248    pub fn with_capacity(capacity: usize) -> Self {
249        Self {
250            buffer: Vec::with_capacity(capacity),
251            offset: 0,
252        }
253    }
254
255    /// Allocate aligned memory
256    pub fn alloc(&mut self, size: usize, align: usize) -> &mut [u8] {
257        // Align offset
258        let padding = (align - (self.offset % align)) % align;
259        self.offset += padding;
260
261        let start = self.offset;
262        let end = start + size;
263
264        if end > self.buffer.capacity() {
265            // Grow buffer
266            self.buffer.reserve(end - self.buffer.len());
267        }
268
269        unsafe {
270            self.buffer.set_len(end);
271        }
272
273        self.offset = end;
274        &mut self.buffer[start..end]
275    }
276
277    /// Reset arena (keeps capacity)
278    pub fn reset(&mut self) {
279        self.offset = 0;
280        self.buffer.clear();
281    }
282
283    /// Get current usage
284    pub fn usage(&self) -> usize {
285        self.offset
286    }
287
288    /// Get capacity
289    pub fn capacity(&self) -> usize {
290        self.buffer.capacity()
291    }
292}
293
294/// Global buffer pools for common sizes
295pub struct GlobalPools {
296    small: BufferPool<Vec<u8>>,   // 1KB buffers
297    medium: BufferPool<Vec<u8>>,  // 64KB buffers
298    large: BufferPool<Vec<u8>>,   // 1MB buffers
299}
300
301impl GlobalPools {
302    fn new() -> Self {
303        Self {
304            small: BufferPool::new(|| Vec::with_capacity(1024), 10, 100),
305            medium: BufferPool::new(|| Vec::with_capacity(64 * 1024), 5, 50),
306            large: BufferPool::new(|| Vec::with_capacity(1024 * 1024), 2, 20),
307        }
308    }
309
310    /// Get the global pools instance
311    pub fn get() -> &'static Self {
312        static POOLS: std::sync::OnceLock<GlobalPools> = std::sync::OnceLock::new();
313        POOLS.get_or_init(GlobalPools::new)
314    }
315
316    /// Acquire small buffer (1KB)
317    pub fn acquire_small(&self) -> PooledBuffer<Vec<u8>> {
318        self.small.acquire()
319    }
320
321    /// Acquire medium buffer (64KB)
322    pub fn acquire_medium(&self) -> PooledBuffer<Vec<u8>> {
323        self.medium.acquire()
324    }
325
326    /// Acquire large buffer (1MB)
327    pub fn acquire_large(&self) -> PooledBuffer<Vec<u8>> {
328        self.large.acquire()
329    }
330}
331
332#[cfg(test)]
333mod tests {
334    use super::*;
335    use std::io::Write;
336    use tempfile::NamedTempFile;
337
338    #[test]
339    fn test_buffer_pool() {
340        let pool = BufferPool::new(|| Vec::with_capacity(1024), 2, 10);
341
342        assert_eq!(pool.size(), 2);
343
344        let mut buf1 = pool.acquire();
345        assert_eq!(buf1.capacity(), 1024);
346        buf1.extend_from_slice(b"test");
347
348        drop(buf1);
349        assert_eq!(pool.size(), 3); // Returned to pool
350    }
351
352    #[test]
353    fn test_mmap_model() {
354        let mut temp = NamedTempFile::new().unwrap();
355        temp.write_all(b"test model data").unwrap();
356        temp.flush().unwrap();
357
358        let mmap = MmapModel::from_file(temp.path()).unwrap();
359        assert_eq!(mmap.as_slice(), b"test model data");
360        assert_eq!(mmap.len(), 15);
361    }
362
363    #[test]
364    fn test_image_view() {
365        let data = vec![
366            255, 0, 0, 255,  // Red pixel
367            0, 255, 0, 255,  // Green pixel
368            0, 0, 255, 255,  // Blue pixel
369            255, 255, 255, 255, // White pixel
370        ];
371
372        let view = ImageView::new(&data, 2, 2, 4).unwrap();
373        assert_eq!(view.dimensions(), (2, 2));
374        assert_eq!(view.pixel(0, 0), &[255, 0, 0, 255]);
375        assert_eq!(view.pixel(1, 1), &[255, 255, 255, 255]);
376    }
377
378    #[test]
379    fn test_arena() {
380        let mut arena = Arena::with_capacity(1024);
381
382        let slice1 = arena.alloc(100, 8);
383        assert_eq!(slice1.len(), 100);
384
385        let slice2 = arena.alloc(200, 8);
386        assert_eq!(slice2.len(), 200);
387
388        assert!(arena.usage() >= 300);
389
390        arena.reset();
391        assert_eq!(arena.usage(), 0);
392    }
393
394    #[test]
395    fn test_global_pools() {
396        let pools = GlobalPools::get();
397
398        let small = pools.acquire_small();
399        assert!(small.capacity() >= 1024);
400
401        let medium = pools.acquire_medium();
402        assert!(medium.capacity() >= 64 * 1024);
403
404        let large = pools.acquire_large();
405        assert!(large.capacity() >= 1024 * 1024);
406    }
407}