copybook_codec_memory/lib.rs
1#![cfg_attr(not(test), deny(clippy::unwrap_used, clippy::expect_used))]
2// SPDX-License-Identifier: AGPL-3.0-or-later
3//! Memory management utilities for streaming record processing
4//!
5//! This module provides bounded memory usage patterns, reusable scratch buffers,
6//! and ordered parallel processing with sequence tracking.
7//!
8//! # Overview
9//!
10//! The memory module implements performance-critical memory management patterns
11//! for high-throughput COBOL data processing. It provides three key capabilities:
12//!
13//! 1. **Scratch buffer reuse** ([`ScratchBuffers`]) - Eliminate allocations in hot paths
14//! 2. **Deterministic parallel processing** ([`WorkerPool`], [`SequenceRing`]) - Maintain record order
15//! 3. **Bounded memory usage** ([`StreamingProcessor`]) - Process multi-GB files with <256 MiB RAM
16//!
17//! # Performance Impact
18//!
19//! These utilities enable copybook-rs to achieve:
20//! - **205 MiB/s** throughput on DISPLAY-heavy workloads (baseline: 2025-09-30)
21//! - **58 MiB/s** throughput on COMP-3-heavy workloads
22//! - **<256 MiB** steady-state memory for multi-GB file processing
23//! - **Deterministic output** with parallel processing (1-8+ worker threads)
24//!
25//! # Examples
26//!
27//! ## Basic Scratch Buffer Usage
28//!
29//! ```rust
30//! use copybook_codec_memory::ScratchBuffers;
31//!
32//! let mut scratch = ScratchBuffers::new();
33//!
34//! // Use buffers for processing
35//! scratch.digit_buffer.push(5);
36//! scratch.byte_buffer.extend_from_slice(b"data");
37//! scratch.string_buffer.push_str("text");
38//!
39//! // Clear for reuse (no deallocation)
40//! scratch.clear();
41//! ```
42//!
43//! ## Parallel Processing with Deterministic Output
44//!
45//! ```rust
46//! use copybook_codec_memory::{WorkerPool, ScratchBuffers};
47//!
48//! // Create worker pool with 4 threads
49//! let mut pool = WorkerPool::new(
50//! 4, // num_workers
51//! 100, // channel_capacity
52//! 50, // max_window_size
53//! |input: Vec<u8>, _scratch: &mut ScratchBuffers| -> String {
54//! // Process input (runs in parallel)
55//! String::from_utf8_lossy(&input).to_string()
56//! },
57//! );
58//!
59//! // Collect chunks so we know the count
60//! let chunks: Vec<Vec<u8>> = get_data_chunks();
61//! let num_chunks = chunks.len();
62//!
63//! // Submit work
64//! for chunk in chunks {
65//! pool.submit(chunk).unwrap();
66//! }
67//!
68//! // Receive exactly num_chunks results in order (non-blocking pattern)
69//! for _ in 0..num_chunks {
70//! let result = pool.recv_ordered().unwrap().unwrap();
71//! println!("{}", result);
72//! }
73//!
74//! pool.shutdown().unwrap();
75//! # fn get_data_chunks() -> Vec<Vec<u8>> { vec![vec![1, 2, 3]] }
76//! ```
77//!
78//! ## Memory-Bounded Streaming
79//!
80//! ```rust
81//! use copybook_codec_memory::StreamingProcessor;
82//! # let records: Vec<Vec<u8>> = vec![vec![1, 2, 3]];
83//!
84//! let mut processor = StreamingProcessor::with_default_limit(); // 256 MiB
85//!
86//! for record in records {
87//! // Check memory pressure before processing
88//! if processor.is_memory_pressure() {
89//! // Flush buffers or throttle input
90//! }
91//!
92//! // Track memory usage
93//! processor.update_memory_usage(record.len() as isize);
94//! processor.record_processed(record.len());
95//! }
96//!
97//! // Get statistics
98//! let stats = processor.stats();
99//! println!("Processed {} records", stats.records_processed);
100//! ```
101
102mod scratch;
103mod streaming;
104mod worker_pool;
105
106#[cfg(test)]
107mod tests;
108
109pub use copybook_sequence_ring::{SequenceRing, SequenceRingStats, SequencedRecord};
110pub use scratch::{DigitBuffer, ScratchBuffers};
111pub use streaming::{StreamingProcessor, StreamingProcessorStats};
112pub use worker_pool::{WorkerPool, WorkerPoolStats};