ipfrs_tensorlogic/
allocation_optimizer.rs

1//! Allocation Optimization Utilities
2//!
3//! This module provides utilities for reducing heap allocations in conversion code,
4//! including buffer pooling and stack-based allocation helpers.
5
6use parking_lot::Mutex;
7use std::collections::VecDeque;
8use std::sync::Arc;
9use thiserror::Error;
10
11#[derive(Error, Debug)]
12pub enum AllocationError {
13    #[error("Buffer pool exhausted")]
14    PoolExhausted,
15
16    #[error("Invalid buffer size: {0}")]
17    InvalidSize(usize),
18
19    #[error("Buffer too small: required {required}, available {available}")]
20    BufferTooSmall { required: usize, available: usize },
21}
22
23/// Buffer pool for reusable byte buffers
24pub struct BufferPool {
25    buffers: Arc<Mutex<VecDeque<Vec<u8>>>>,
26    buffer_size: usize,
27    max_buffers: usize,
28}
29
30impl BufferPool {
31    /// Create a new buffer pool
32    pub fn new(buffer_size: usize, max_buffers: usize) -> Self {
33        Self {
34            buffers: Arc::new(Mutex::new(VecDeque::new())),
35            buffer_size,
36            max_buffers,
37        }
38    }
39
40    /// Acquire a buffer from the pool
41    pub fn acquire(&self) -> PooledBuffer {
42        let mut buffers = self.buffers.lock();
43
44        let buffer = if let Some(mut buf) = buffers.pop_front() {
45            buf.clear();
46            buf.reserve(self.buffer_size);
47            buf
48        } else {
49            Vec::with_capacity(self.buffer_size)
50        };
51
52        PooledBuffer {
53            buffer,
54            pool: Arc::clone(&self.buffers),
55            max_buffers: self.max_buffers,
56        }
57    }
58
59    /// Get current pool size
60    pub fn size(&self) -> usize {
61        self.buffers.lock().len()
62    }
63
64    /// Get buffer capacity
65    pub fn buffer_size(&self) -> usize {
66        self.buffer_size
67    }
68}
69
70/// RAII guard for pooled buffer
71pub struct PooledBuffer {
72    buffer: Vec<u8>,
73    pool: Arc<Mutex<VecDeque<Vec<u8>>>>,
74    max_buffers: usize,
75}
76
77impl PooledBuffer {
78    /// Get mutable reference to the buffer
79    #[allow(clippy::should_implement_trait)]
80    pub fn as_mut(&mut self) -> &mut Vec<u8> {
81        &mut self.buffer
82    }
83
84    /// Get reference to the buffer
85    #[allow(clippy::should_implement_trait)]
86    pub fn as_ref(&self) -> &[u8] {
87        &self.buffer
88    }
89
90    /// Get the buffer length
91    pub fn len(&self) -> usize {
92        self.buffer.len()
93    }
94
95    /// Check if buffer is empty
96    pub fn is_empty(&self) -> bool {
97        self.buffer.is_empty()
98    }
99}
100
101impl Drop for PooledBuffer {
102    fn drop(&mut self) {
103        let mut pool = self.pool.lock();
104        if pool.len() < self.max_buffers {
105            // Return buffer to pool
106            let buffer = std::mem::take(&mut self.buffer);
107            pool.push_back(buffer);
108        }
109    }
110}
111
112impl std::ops::Deref for PooledBuffer {
113    type Target = [u8];
114
115    fn deref(&self) -> &Self::Target {
116        &self.buffer
117    }
118}
119
120impl std::ops::DerefMut for PooledBuffer {
121    fn deref_mut(&mut self) -> &mut Self::Target {
122        &mut self.buffer
123    }
124}
125
126/// Typed buffer pool for specific types
127pub struct TypedBufferPool<T> {
128    buffers: Arc<Mutex<VecDeque<Vec<T>>>>,
129    buffer_capacity: usize,
130    max_buffers: usize,
131}
132
133impl<T> TypedBufferPool<T> {
134    /// Create a new typed buffer pool
135    pub fn new(buffer_capacity: usize, max_buffers: usize) -> Self {
136        Self {
137            buffers: Arc::new(Mutex::new(VecDeque::new())),
138            buffer_capacity,
139            max_buffers,
140        }
141    }
142
143    /// Acquire a buffer from the pool
144    pub fn acquire(&self) -> TypedPooledBuffer<T> {
145        let mut buffers = self.buffers.lock();
146
147        let buffer = if let Some(mut buf) = buffers.pop_front() {
148            buf.clear();
149            buf.reserve(self.buffer_capacity);
150            buf
151        } else {
152            Vec::with_capacity(self.buffer_capacity)
153        };
154
155        TypedPooledBuffer {
156            buffer,
157            pool: Arc::clone(&self.buffers),
158            max_buffers: self.max_buffers,
159        }
160    }
161
162    /// Get current pool size
163    pub fn size(&self) -> usize {
164        self.buffers.lock().len()
165    }
166}
167
168/// RAII guard for typed pooled buffer
169pub struct TypedPooledBuffer<T> {
170    buffer: Vec<T>,
171    pool: Arc<Mutex<VecDeque<Vec<T>>>>,
172    max_buffers: usize,
173}
174
175impl<T> TypedPooledBuffer<T> {
176    /// Get mutable reference to the buffer
177    #[allow(clippy::should_implement_trait)]
178    pub fn as_mut(&mut self) -> &mut Vec<T> {
179        &mut self.buffer
180    }
181
182    /// Get reference to the buffer
183    #[allow(clippy::should_implement_trait)]
184    pub fn as_ref(&self) -> &[T] {
185        &self.buffer
186    }
187
188    /// Get the buffer length
189    pub fn len(&self) -> usize {
190        self.buffer.len()
191    }
192
193    /// Check if buffer is empty
194    pub fn is_empty(&self) -> bool {
195        self.buffer.is_empty()
196    }
197
198    /// Push an element to the buffer
199    pub fn push(&mut self, value: T) {
200        self.buffer.push(value);
201    }
202
203    /// Extend buffer with iterator
204    pub fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
205        self.buffer.extend(iter);
206    }
207}
208
209impl<T> Drop for TypedPooledBuffer<T> {
210    fn drop(&mut self) {
211        let mut pool = self.pool.lock();
212        if pool.len() < self.max_buffers {
213            // Return buffer to pool
214            let buffer = std::mem::take(&mut self.buffer);
215            pool.push_back(buffer);
216        }
217    }
218}
219
220impl<T> std::ops::Deref for TypedPooledBuffer<T> {
221    type Target = [T];
222
223    fn deref(&self) -> &Self::Target {
224        &self.buffer
225    }
226}
227
228impl<T> std::ops::DerefMut for TypedPooledBuffer<T> {
229    fn deref_mut(&mut self) -> &mut Self::Target {
230        &mut self.buffer
231    }
232}
233
234/// Zero-copy conversion utilities
235pub struct ZeroCopyConverter;
236
237impl ZeroCopyConverter {
238    /// Convert slice to different representation without copying
239    #[inline]
240    pub fn cast_slice<T, U>(slice: &[T]) -> &[U]
241    where
242        T: bytemuck::Pod,
243        U: bytemuck::Pod,
244    {
245        bytemuck::cast_slice(slice)
246    }
247
248    /// Convert mutable slice to different representation without copying
249    #[inline]
250    pub fn cast_slice_mut<T, U>(slice: &mut [T]) -> &mut [U]
251    where
252        T: bytemuck::Pod,
253        U: bytemuck::Pod,
254    {
255        bytemuck::cast_slice_mut(slice)
256    }
257
258    /// Convert bytes to typed slice
259    #[inline]
260    pub fn bytes_to_slice<T: bytemuck::Pod>(bytes: &[u8]) -> &[T] {
261        bytemuck::cast_slice(bytes)
262    }
263
264    /// Convert typed slice to bytes
265    #[inline]
266    pub fn slice_to_bytes<T: bytemuck::Pod>(slice: &[T]) -> &[u8] {
267        bytemuck::cast_slice(slice)
268    }
269}
270
271/// Stack-based buffer for small allocations
272pub struct StackBuffer<const N: usize> {
273    data: [u8; N],
274    len: usize,
275}
276
277impl<const N: usize> StackBuffer<N> {
278    /// Create a new stack buffer
279    #[inline]
280    pub const fn new() -> Self {
281        Self {
282            data: [0u8; N],
283            len: 0,
284        }
285    }
286
287    /// Get capacity
288    #[inline]
289    pub const fn capacity(&self) -> usize {
290        N
291    }
292
293    /// Get length
294    #[inline]
295    pub const fn len(&self) -> usize {
296        self.len
297    }
298
299    /// Check if empty
300    #[inline]
301    pub const fn is_empty(&self) -> bool {
302        self.len == 0
303    }
304
305    /// Get remaining capacity
306    #[inline]
307    pub const fn remaining(&self) -> usize {
308        N - self.len
309    }
310
311    /// Write bytes to buffer
312    #[inline]
313    pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
314        if bytes.len() > self.remaining() {
315            return Err(AllocationError::BufferTooSmall {
316                required: bytes.len(),
317                available: self.remaining(),
318            });
319        }
320
321        self.data[self.len..self.len + bytes.len()].copy_from_slice(bytes);
322        self.len += bytes.len();
323        Ok(())
324    }
325
326    /// Get slice of written data
327    #[inline]
328    pub fn as_slice(&self) -> &[u8] {
329        &self.data[..self.len]
330    }
331
332    /// Clear buffer
333    #[inline]
334    pub fn clear(&mut self) {
335        self.len = 0;
336    }
337}
338
339impl<const N: usize> Default for StackBuffer<N> {
340    fn default() -> Self {
341        Self::new()
342    }
343}
344
345/// Conversion buffer that uses stack for small sizes, heap for large
346pub enum AdaptiveBuffer {
347    Stack(Box<StackBuffer<256>>),
348    Heap(Vec<u8>),
349}
350
351impl AdaptiveBuffer {
352    /// Create a new adaptive buffer
353    #[inline]
354    pub fn new(size_hint: usize) -> Self {
355        if size_hint <= 256 {
356            Self::Stack(Box::default())
357        } else {
358            Self::Heap(Vec::with_capacity(size_hint))
359        }
360    }
361
362    /// Write bytes to buffer
363    pub fn write(&mut self, bytes: &[u8]) -> Result<(), AllocationError> {
364        match self {
365            Self::Stack(buf) => {
366                if buf.remaining() >= bytes.len() {
367                    buf.write(bytes)
368                } else {
369                    // Upgrade to heap
370                    let mut heap = Vec::with_capacity(buf.len() + bytes.len());
371                    heap.extend_from_slice(buf.as_slice());
372                    heap.extend_from_slice(bytes);
373                    *self = Self::Heap(heap);
374                    Ok(())
375                }
376            }
377            Self::Heap(vec) => {
378                vec.extend_from_slice(bytes);
379                Ok(())
380            }
381        }
382    }
383
384    /// Get slice of written data
385    pub fn as_slice(&self) -> &[u8] {
386        match self {
387            Self::Stack(buf) => buf.as_slice(),
388            Self::Heap(vec) => vec.as_slice(),
389        }
390    }
391
392    /// Get length
393    pub fn len(&self) -> usize {
394        match self {
395            Self::Stack(buf) => buf.len(),
396            Self::Heap(vec) => vec.len(),
397        }
398    }
399
400    /// Check if empty
401    pub fn is_empty(&self) -> bool {
402        self.len() == 0
403    }
404}
405
406#[cfg(test)]
407mod tests {
408    use super::*;
409
410    #[test]
411    fn test_buffer_pool() {
412        let pool = BufferPool::new(1024, 4);
413
414        let mut buffer1 = pool.acquire();
415        buffer1.as_mut().extend_from_slice(&[1, 2, 3]);
416        assert_eq!(buffer1.len(), 3);
417
418        drop(buffer1);
419
420        // Buffer should be returned to pool
421        assert_eq!(pool.size(), 1);
422
423        let buffer2 = pool.acquire();
424        assert_eq!(buffer2.len(), 0); // Should be cleared
425    }
426
427    #[test]
428    fn test_typed_buffer_pool() {
429        let pool = TypedBufferPool::<f32>::new(100, 4);
430
431        let mut buffer1 = pool.acquire();
432        buffer1.push(1.0);
433        buffer1.push(2.0);
434        assert_eq!(buffer1.len(), 2);
435
436        drop(buffer1);
437
438        // Buffer should be returned to pool
439        assert_eq!(pool.size(), 1);
440
441        let buffer2 = pool.acquire();
442        assert_eq!(buffer2.len(), 0); // Should be cleared
443    }
444
445    #[test]
446    fn test_zero_copy_converter() {
447        let floats: Vec<f32> = vec![1.0, 2.0, 3.0, 4.0];
448        let bytes = ZeroCopyConverter::slice_to_bytes(&floats);
449        assert_eq!(bytes.len(), 16); // 4 floats * 4 bytes
450
451        let floats_back: &[f32] = ZeroCopyConverter::bytes_to_slice(bytes);
452        assert_eq!(floats_back, &floats);
453    }
454
455    #[test]
456    fn test_stack_buffer() {
457        let mut buf = StackBuffer::<64>::new();
458        assert_eq!(buf.capacity(), 64);
459        assert_eq!(buf.len(), 0);
460
461        buf.write(&[1, 2, 3]).unwrap();
462        assert_eq!(buf.len(), 3);
463        assert_eq!(buf.as_slice(), &[1, 2, 3]);
464
465        buf.clear();
466        assert_eq!(buf.len(), 0);
467    }
468
469    #[test]
470    fn test_stack_buffer_overflow() {
471        let mut buf = StackBuffer::<4>::new();
472        assert!(buf.write(&[1, 2, 3, 4]).is_ok());
473        assert!(buf.write(&[5]).is_err()); // Should fail
474    }
475
476    #[test]
477    fn test_adaptive_buffer_small() {
478        let mut buf = AdaptiveBuffer::new(10);
479        buf.write(&[1, 2, 3]).unwrap();
480
481        assert!(matches!(buf, AdaptiveBuffer::Stack(_)));
482        assert_eq!(buf.as_slice(), &[1, 2, 3]);
483    }
484
485    #[test]
486    fn test_adaptive_buffer_large() {
487        let mut buf = AdaptiveBuffer::new(512);
488        buf.write(&[1, 2, 3]).unwrap();
489
490        assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
491        assert_eq!(buf.as_slice(), &[1, 2, 3]);
492    }
493
494    #[test]
495    fn test_adaptive_buffer_upgrade() {
496        let mut buf = AdaptiveBuffer::new(10);
497        buf.write(&[1; 100]).unwrap(); // Small hint but large write
498        buf.write(&[2; 200]).unwrap(); // Continue writing
499
500        // Should have upgraded to heap
501        assert!(matches!(buf, AdaptiveBuffer::Heap(_)));
502        assert_eq!(buf.len(), 300);
503    }
504
505    #[test]
506    fn test_pooled_buffer_deref() {
507        let pool = BufferPool::new(1024, 4);
508        let mut buffer = pool.acquire();
509
510        buffer.as_mut().extend_from_slice(&[1, 2, 3, 4]);
511
512        // Test deref
513        assert_eq!(buffer[0], 1);
514        assert_eq!(buffer[3], 4);
515    }
516}