Skip to main content

scirs2_signal/
memory_optimized.rs

1// Memory-optimized algorithms for large signal processing
2//
3// This module provides memory-efficient implementations of signal processing
4// algorithms designed to work with very large signals that might not fit
5// entirely in memory, or where memory usage needs to be carefully controlled.
6
7use crate::error::{SignalError, SignalResult};
8
9use scirs2_core::numeric::{Complex, Complex64};
10use scirs2_core::parallel_ops::*;
11use std::f64::consts::PI;
12use std::fs;
13use std::fs::File;
14use std::io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write};
15use std::time::Instant;
16
17#[allow(unused_imports)]
18/// Configuration for memory-optimized operations
19#[derive(Debug, Clone)]
20pub struct MemoryConfig {
21    /// Maximum memory usage in bytes
22    pub max_memory_bytes: usize,
23    /// Chunk size for processing (samples)
24    pub chunk_size: usize,
25    /// Overlap between chunks (samples)
26    pub overlap_size: usize,
27    /// Use memory mapping for large files
28    pub use_mmap: bool,
29    /// Temporary directory for scratch files
30    pub temp_dir: Option<String>,
31    /// Enable compression for temporary files
32    pub compress_temp: bool,
33    /// Cache size for frequently accessed data
34    pub cache_size: usize,
35}
36
37impl Default for MemoryConfig {
38    fn default() -> Self {
39        #[cfg(target_pointer_width = "32")]
40        let max_memory_bytes = 256 * 1024 * 1024; // 256MB for 32-bit
41        #[cfg(target_pointer_width = "64")]
42        let max_memory_bytes = 1024 * 1024 * 1024; // 1GB for 64-bit
43
44        Self {
45            max_memory_bytes,
46            chunk_size: 65536,  // 64K samples
47            overlap_size: 1024, // 1K overlap
48            use_mmap: true,
49            temp_dir: None, // Use system temp
50            compress_temp: false,
51            cache_size: 128 * 1024 * 1024, // 128MB cache
52        }
53    }
54}
55
56/// Result of memory-optimized operation
57#[derive(Debug)]
58pub struct MemoryOptimizedResult<T> {
59    /// Result data (may be on disk)
60    pub data: MemoryOptimizedData<T>,
61    /// Memory usage statistics
62    pub memory_stats: MemoryStats,
63    /// Processing time statistics
64    pub timing_stats: TimingStats,
65}
66
67/// Memory usage statistics
68#[derive(Debug, Clone)]
69pub struct MemoryStats {
70    /// Peak memory usage (bytes)
71    pub peak_memory: usize,
72    /// Average memory usage (bytes)
73    pub avg_memory: usize,
74    /// Number of cache hits
75    pub cache_hits: usize,
76    /// Number of cache misses
77    pub cache_misses: usize,
78    /// Number of disk I/O operations
79    pub disk_operations: usize,
80}
81
82/// Timing statistics
83#[derive(Debug, Clone)]
84pub struct TimingStats {
85    /// Total processing time (ms)
86    pub total_time_ms: u128,
87    /// Time spent on I/O (ms)
88    pub io_time_ms: u128,
89    /// Time spent on computation (ms)
90    pub compute_time_ms: u128,
91    /// Time spent on memory management (ms)
92    pub memory_mgmt_time_ms: u128,
93}
94
95/// Memory-optimized data storage
96#[derive(Debug)]
97pub enum MemoryOptimizedData<T> {
98    /// Data in memory
99    InMemory(Vec<T>),
100    /// Data on disk with file path
101    OnDisk {
102        file_path: String,
103        length: usize,
104        chunk_size: usize,
105    },
106    /// Hybrid storage (frequently accessed in memory, rest on disk)
107    Hybrid {
108        memory_chunks: Vec<Option<Vec<T>>>,
109        disk_file: String,
110        chunk_size: usize,
111        total_length: usize,
112    },
113}
114
115/// Memory-optimized FIR filtering for very large signals
116///
117/// Processes signals that may not fit in memory by using chunked processing
118/// with proper overlap handling to maintain filter continuity.
119#[allow(dead_code)]
120pub fn memory_optimized_fir_filter(
121    input_file: &str,
122    output_file: &str,
123    coefficients: &[f64],
124    config: &MemoryConfig,
125) -> SignalResult<MemoryOptimizedResult<f64>> {
126    let start_time = Instant::now();
127    let mut memory_stats = MemoryStats {
128        peak_memory: 0,
129        avg_memory: 0,
130        cache_hits: 0,
131        cache_misses: 0,
132        disk_operations: 0,
133    };
134
135    // Open input _file and determine size
136    let input_file_handle = File::open(input_file)
137        .map_err(|e| SignalError::ComputationError(format!("Cannot open input file: {}", e)))?;
138
139    let file_size = input_file_handle
140        .metadata()
141        .map_err(|e| SignalError::ComputationError(format!("Cannot get _file size: {}", e)))?
142        .len() as usize;
143
144    let samples_count = file_size / std::mem::size_of::<f64>();
145    let filter_length = coefficients.len();
146
147    // Create output _file
148    let output_file_handle = File::create(output_file)
149        .map_err(|e| SignalError::ComputationError(format!("Cannot create output file: {}", e)))?;
150
151    let mut input_reader = BufReader::new(input_file_handle);
152    let mut output_writer = BufWriter::new(output_file_handle);
153
154    // Calculate optimal chunk size based on memory constraints
155    let sample_size = std::mem::size_of::<f64>();
156    let max_samples_in_memory = config.max_memory_bytes / sample_size / 4; // Reserve space for intermediate buffers
157    let chunk_size = config.chunk_size.min(max_samples_in_memory);
158
159    // Initialize filter state (for IIR continuity across chunks)
160    let mut filter_memory = vec![0.0; filter_length.saturating_sub(1)];
161
162    let mut input_buffer = vec![0.0; chunk_size + config.overlap_size];
163    let mut output_buffer = vec![0.0; chunk_size];
164
165    let mut total_processed = 0;
166    let mut peak_memory = 0;
167    let mut io_time = 0;
168    let mut compute_time = 0;
169
170    // Process signal in chunks
171    while total_processed < samples_count {
172        let io_start = Instant::now();
173
174        // Read chunk with overlap
175        let samples_to_read =
176            (chunk_size + config.overlap_size).min(samples_count - total_processed);
177
178        // Read binary data (assuming f64 samples)
179        let mut raw_buffer = vec![0u8; samples_to_read * sample_size];
180        input_reader
181            .read_exact(&mut raw_buffer)
182            .map_err(|e| SignalError::ComputationError(format!("Read error: {}", e)))?;
183
184        // Convert bytes to f64 samples
185        for (i, chunk) in raw_buffer.chunks_exact(sample_size).enumerate() {
186            if i < input_buffer.len() {
187                input_buffer[i] = f64::from_le_bytes(chunk.try_into().map_err(|_| {
188                    SignalError::ComputationError("Invalid data format".to_string())
189                })?);
190            }
191        }
192
193        memory_stats.disk_operations += 1;
194        io_time += io_start.elapsed().as_millis();
195
196        let compute_start = Instant::now();
197
198        // Apply filter to chunk
199        let effective_length = samples_to_read.min(chunk_size);
200
201        // FIR filtering with overlap handling
202        for i in 0..effective_length {
203            let mut output_sample = 0.0;
204
205            for (j, &coeff) in coefficients.iter().enumerate() {
206                let input_value = if i >= j {
207                    // Current chunk
208                    let input_idx = i - j;
209                    if input_idx < input_buffer.len() {
210                        input_buffer[input_idx]
211                    } else {
212                        0.0
213                    }
214                } else {
215                    // Use previous chunk data (filter memory) or zero
216                    if j - i - 1 < filter_memory.len() {
217                        filter_memory[filter_memory.len() - (j - i)]
218                    } else {
219                        0.0
220                    }
221                };
222
223                output_sample += input_value * coeff;
224            }
225
226            output_buffer[i] = output_sample;
227        }
228
229        // Update filter memory for next chunk
230        let memory_start = filter_memory.len().saturating_sub(filter_length - 1);
231        for i in 0..(filter_length - 1).min(effective_length) {
232            if memory_start + i < filter_memory.len() {
233                filter_memory[memory_start + i] =
234                    input_buffer[effective_length - (filter_length - 1) + i];
235            }
236        }
237
238        compute_time += compute_start.elapsed().as_millis();
239
240        let io_start = Instant::now();
241
242        // Write output chunk
243        for &sample in &output_buffer[..effective_length] {
244            let sample: f64 = sample;
245            let bytes = sample.to_le_bytes();
246            output_writer
247                .write_all(&bytes)
248                .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
249        }
250
251        memory_stats.disk_operations += 1;
252        io_time += io_start.elapsed().as_millis();
253
254        // Update memory usage tracking
255        let current_memory = input_buffer.len() * sample_size
256            + output_buffer.len() * sample_size
257            + filter_memory.len() * sample_size;
258        peak_memory = peak_memory.max(current_memory);
259
260        total_processed += effective_length;
261
262        // Clear buffers to force deallocation
263        if total_processed % (chunk_size * 10) == 0 {
264            input_buffer.clear();
265            input_buffer.resize(chunk_size + config.overlap_size, 0.0);
266            output_buffer.clear();
267            output_buffer.resize(chunk_size, 0.0);
268        }
269    }
270
271    // Flush output
272    output_writer
273        .flush()
274        .map_err(|e| SignalError::ComputationError(format!("Flush error: {}", e)))?;
275
276    memory_stats.peak_memory = peak_memory;
277    memory_stats.avg_memory = peak_memory / 2; // Rough estimate
278
279    let total_time = start_time.elapsed().as_millis();
280    let timing_stats = TimingStats {
281        total_time_ms: total_time,
282        io_time_ms: io_time,
283        compute_time_ms: compute_time,
284        memory_mgmt_time_ms: total_time - io_time - compute_time,
285    };
286
287    Ok(MemoryOptimizedResult {
288        data: MemoryOptimizedData::OnDisk {
289            file_path: output_file.to_string(),
290            length: samples_count,
291            chunk_size,
292        },
293        memory_stats,
294        timing_stats,
295    })
296}
297
298/// Memory-optimized FFT for very large signals
299///
300/// Computes FFT of signals larger than available memory using disk-based
301/// radix-2 algorithms with minimal memory footprint.
302#[allow(dead_code)]
303pub fn memory_optimized_fft(
304    input_file: &str,
305    output_file: &str,
306    config: &MemoryConfig,
307) -> SignalResult<MemoryOptimizedResult<scirs2_core::numeric::Complex64>> {
308    let _start_time = Instant::now();
309
310    // Open input _file and validate size
311    let input_file_handle = File::open(input_file)
312        .map_err(|e| SignalError::ComputationError(format!("Cannot open input file: {}", e)))?;
313
314    let file_size = input_file_handle
315        .metadata()
316        .map_err(|e| SignalError::ComputationError(format!("Cannot get _file size: {}", e)))?
317        .len() as usize;
318
319    let complex_size = std::mem::size_of::<Complex64>();
320    let n = file_size / complex_size;
321
322    // Validate that n is a power of 2
323    if !n.is_power_of_two() {
324        return Err(SignalError::ValueError(
325            "FFT size must be a power of 2 for memory-optimized implementation".to_string(),
326        ));
327    }
328
329    let log2n = n.trailing_zeros() as usize;
330
331    // Calculate memory requirements
332    let sample_size = complex_size;
333    let max_samples_in_memory = config.max_memory_bytes / sample_size / 3; // Triple buffering
334
335    if n <= max_samples_in_memory {
336        // Can fit in memory - use standard FFT
337        return memory_fft_in_core(input_file, output_file, n, config);
338    }
339
340    // Use out-of-core FFT algorithm
341    memory_fft_out_of_core(input_file, output_file, n, log2n, config)
342}
343
344/// In-core FFT for moderately large signals
345#[allow(dead_code)]
346fn memory_fft_in_core(
347    input_file: &str,
348    output_file: &str,
349    n: usize,
350    _config: &MemoryConfig,
351) -> SignalResult<MemoryOptimizedResult<scirs2_core::numeric::Complex64>> {
352    let start_time = Instant::now();
353    let io_start = Instant::now();
354
355    // Read entire signal into memory
356    let input_file_handle = File::open(input_file)
357        .map_err(|e| SignalError::ComputationError(format!("Cannot open input file: {}", e)))?;
358
359    let mut input_reader = BufReader::new(input_file_handle);
360    let mut data = vec![Complex64::new(0.0, 0.0); n];
361
362    // Read _complex data
363    for i in 0..n {
364        let mut real_bytes = [0u8; 8];
365        let mut imag_bytes = [0u8; 8];
366
367        input_reader
368            .read_exact(&mut real_bytes)
369            .map_err(|e| SignalError::ComputationError(format!("Read error: {}", e)))?;
370        input_reader
371            .read_exact(&mut imag_bytes)
372            .map_err(|e| SignalError::ComputationError(format!("Read error: {}", e)))?;
373
374        let real = f64::from_le_bytes(real_bytes);
375        let imag = f64::from_le_bytes(imag_bytes);
376        data[i] = Complex64::new(real, imag);
377    }
378
379    let io_time = io_start.elapsed().as_millis();
380    let compute_start = Instant::now();
381
382    // Compute FFT using scirs2_fft
383    let fft_result = scirs2_fft::fft(&data, Some(data.len()))
384        .map_err(|e| SignalError::ComputationError(format!("FFT failed: {}", e)))?;
385    for (i, c) in fft_result.iter().enumerate() {
386        data[i] = *c;
387    }
388
389    let compute_time = compute_start.elapsed().as_millis();
390    let io_start = Instant::now();
391
392    // Write result
393    let output_file_handle = File::create(output_file)
394        .map_err(|e| SignalError::ComputationError(format!("Cannot create output file: {}", e)))?;
395
396    let mut output_writer = BufWriter::new(output_file_handle);
397
398    for sample in &data {
399        output_writer
400            .write_all(&sample.re.to_le_bytes())
401            .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
402        output_writer
403            .write_all(&sample.im.to_le_bytes())
404            .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
405    }
406
407    output_writer
408        .flush()
409        .map_err(|e| SignalError::ComputationError(format!("Flush error: {}", e)))?;
410
411    let io_time_total = io_time + io_start.elapsed().as_millis();
412    let total_time = start_time.elapsed().as_millis();
413
414    let memory_stats = MemoryStats {
415        peak_memory: n * std::mem::size_of::<Complex64>(),
416        avg_memory: n * std::mem::size_of::<Complex64>() / 2,
417        cache_hits: 0,
418        cache_misses: 0,
419        disk_operations: 2, // Read and write
420    };
421
422    let timing_stats = TimingStats {
423        total_time_ms: total_time,
424        io_time_ms: io_time_total,
425        compute_time_ms: compute_time,
426        memory_mgmt_time_ms: total_time - io_time_total - compute_time,
427    };
428
429    Ok(MemoryOptimizedResult {
430        data: MemoryOptimizedData::OnDisk {
431            file_path: output_file.to_string(),
432            length: n,
433            chunk_size: n,
434        },
435        memory_stats,
436        timing_stats,
437    })
438}
439
440/// Out-of-core FFT for very large signals
441#[allow(dead_code)]
442fn memory_fft_out_of_core(
443    input_file: &str,
444    output_file: &str,
445    n: usize,
446    log2n: usize,
447    config: &MemoryConfig,
448) -> SignalResult<MemoryOptimizedResult<scirs2_core::numeric::Complex64>> {
449    let start_time = Instant::now();
450    let mut memory_stats = MemoryStats {
451        peak_memory: 0,
452        avg_memory: 0,
453        cache_hits: 0,
454        cache_misses: 0,
455        disk_operations: 0,
456    };
457
458    // Calculate stage parameters
459    let complex_size = std::mem::size_of::<Complex64>();
460    let max_samples_in_memory = config.max_memory_bytes / complex_size / 4;
461
462    // Determine how many stages we can do in memory vs. on disk
463    let log2_memory_limit = (max_samples_in_memory as f32).log2().floor() as usize;
464    let in_memory_stages = log2_memory_limit.min(log2n);
465    let disk_stages = log2n - in_memory_stages;
466
467    // Create temporary files for intermediate results
468    let temp_dir = config.temp_dir.as_deref().unwrap_or("/tmp");
469    let temp_file = format!("{}/fft_temp_{}.dat", temp_dir, std::process::id());
470
471    let mut current_input = input_file.to_string();
472    let mut current_output = if disk_stages > 0 {
473        temp_file.clone()
474    } else {
475        output_file.to_string()
476    };
477
478    let mut total_io_time = 0;
479    let mut total_compute_time = 0;
480
481    // Perform out-of-core stages first (if any)
482    for stage in 0..disk_stages {
483        let _stage_start = Instant::now();
484
485        // Process this stage with disk I/O
486        let stage_result =
487            process_fft_stage_disk(&current_input, &current_output, n, stage, config)?;
488
489        memory_stats.disk_operations += stage_result.memory_stats.disk_operations;
490        total_io_time += stage_result.timing_stats.io_time_ms;
491        total_compute_time += stage_result.timing_stats.compute_time_ms;
492
493        // Swap files for next stage
494        if stage < disk_stages - 1 {
495            current_input = current_output.clone();
496            current_output = format!("{}_stage_{}", temp_file, stage + 1);
497        }
498    }
499
500    // Perform in-memory stages
501    if in_memory_stages > 0 {
502        let final_input = if disk_stages > 0 {
503            &current_output
504        } else {
505            input_file
506        };
507        let stage_result = process_fft_stages_memory(
508            final_input,
509            output_file,
510            n,
511            disk_stages,
512            in_memory_stages,
513            config,
514        )?;
515
516        memory_stats.disk_operations += stage_result.memory_stats.disk_operations;
517        total_io_time += stage_result.timing_stats.io_time_ms;
518        total_compute_time += stage_result.timing_stats.compute_time_ms;
519        memory_stats.peak_memory = stage_result.memory_stats.peak_memory;
520    }
521
522    // Clean up temporary files
523    let _ = std::fs::remove_file(&temp_file);
524    for stage in 0..disk_stages.saturating_sub(1) {
525        let _ = std::fs::remove_file(format!("{}_stage_{}", temp_file, stage + 1));
526    }
527
528    let total_time = start_time.elapsed().as_millis();
529    let timing_stats = TimingStats {
530        total_time_ms: total_time,
531        io_time_ms: total_io_time,
532        compute_time_ms: total_compute_time,
533        memory_mgmt_time_ms: total_time - total_io_time - total_compute_time,
534    };
535
536    Ok(MemoryOptimizedResult {
537        data: MemoryOptimizedData::OnDisk {
538            file_path: output_file.to_string(),
539            length: n,
540            chunk_size: config.chunk_size,
541        },
542        memory_stats,
543        timing_stats,
544    })
545}
546
547/// Process a single FFT stage with disk I/O
548#[allow(dead_code)]
549fn process_fft_stage_disk(
550    input_file: &str,
551    output_file: &str,
552    n: usize,
553    stage: usize,
554    config: &MemoryConfig,
555) -> SignalResult<MemoryOptimizedResult<scirs2_core::numeric::Complex64>> {
556    let start_time = std::time::Instant::now();
557    let mut disk_ops = 0;
558
559    // Open files
560    let input_handle = File::open(input_file)
561        .map_err(|e| SignalError::ComputationError(format!("Cannot open input: {}", e)))?;
562    let output_handle = File::create(output_file)
563        .map_err(|e| SignalError::ComputationError(format!("Cannot create output: {}", e)))?;
564
565    let mut input_reader = BufReader::new(input_handle);
566    let mut output_writer = BufWriter::new(output_handle);
567
568    // Calculate stage parameters
569    let stage_size = 1 << (stage + 1);
570    let half_stage = stage_size / 2;
571    let num_groups = n / stage_size;
572
573    let complex_size = std::mem::size_of::<Complex64>();
574    let max_groups_in_memory = config.max_memory_bytes / (stage_size * complex_size);
575    let groups_per_chunk = max_groups_in_memory.max(1);
576
577    let mut io_time = 0;
578    let mut compute_time = 0;
579
580    // Process groups in chunks
581    for chunk_start in (0..num_groups).step_by(groups_per_chunk) {
582        let io_start = std::time::Instant::now();
583
584        let chunk_groups = groups_per_chunk.min(num_groups - chunk_start);
585        let chunk_samples = chunk_groups * stage_size;
586
587        // Read chunk
588        let mut data = vec![Complex64::new(0.0, 0.0); chunk_samples];
589
590        // Seek to the correct position
591        let byte_offset = chunk_start * stage_size * complex_size;
592        input_reader
593            .seek(SeekFrom::Start(byte_offset as u64))
594            .map_err(|e| SignalError::ComputationError(format!("Seek error: {}", e)))?;
595
596        for i in 0..chunk_samples {
597            let mut real_bytes = [0u8; 8];
598            let mut imag_bytes = [0u8; 8];
599
600            input_reader
601                .read_exact(&mut real_bytes)
602                .map_err(|e| SignalError::ComputationError(format!("Read error: {}", e)))?;
603            input_reader
604                .read_exact(&mut imag_bytes)
605                .map_err(|e| SignalError::ComputationError(format!("Read error: {}", e)))?;
606
607            data[i] = Complex64::new(
608                f64::from_le_bytes(real_bytes),
609                f64::from_le_bytes(imag_bytes),
610            );
611        }
612
613        disk_ops += 1;
614        io_time += io_start.elapsed().as_millis();
615
616        let compute_start = std::time::Instant::now();
617
618        // Process butterfly operations for this chunk
619        for group in 0..chunk_groups {
620            let group_offset = group * stage_size;
621
622            for i in 0..half_stage {
623                let j = i + half_stage;
624                let twiddle_angle = -2.0 * PI * (i as f64) / (stage_size as f64);
625                let twiddle = Complex64::new(twiddle_angle.cos(), twiddle_angle.sin());
626
627                let idx1 = group_offset + i;
628                let idx2 = group_offset + j;
629
630                let t = data[idx2] * twiddle;
631                let u = data[idx1];
632
633                data[idx1] = u + t;
634                data[idx2] = u - t;
635            }
636        }
637
638        compute_time += compute_start.elapsed().as_millis();
639
640        let io_start = std::time::Instant::now();
641
642        // Write chunk back
643        output_writer
644            .seek(SeekFrom::Start(byte_offset as u64))
645            .map_err(|e| SignalError::ComputationError(format!("Output seek error: {}", e)))?;
646
647        for sample in &data {
648            output_writer
649                .write_all(&sample.re.to_le_bytes())
650                .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
651            output_writer
652                .write_all(&sample.im.to_le_bytes())
653                .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
654        }
655
656        disk_ops += 1;
657        io_time += io_start.elapsed().as_millis();
658    }
659
660    output_writer
661        .flush()
662        .map_err(|e| SignalError::ComputationError(format!("Flush error: {}", e)))?;
663
664    let total_time = start_time.elapsed().as_millis();
665
666    let memory_stats = MemoryStats {
667        peak_memory: groups_per_chunk * stage_size * complex_size,
668        avg_memory: groups_per_chunk * stage_size * complex_size / 2,
669        cache_hits: 0,
670        cache_misses: 0,
671        disk_operations: disk_ops,
672    };
673
674    let timing_stats = TimingStats {
675        total_time_ms: total_time,
676        io_time_ms: io_time,
677        compute_time_ms: compute_time,
678        memory_mgmt_time_ms: total_time - io_time - compute_time,
679    };
680
681    Ok(MemoryOptimizedResult {
682        data: MemoryOptimizedData::OnDisk {
683            file_path: output_file.to_string(),
684            length: n,
685            chunk_size: config.chunk_size,
686        },
687        memory_stats,
688        timing_stats,
689    })
690}
691
692/// Process remaining FFT stages in memory
693#[allow(dead_code)]
694fn process_fft_stages_memory(
695    input_file: &str,
696    output_file: &str,
697    n: usize,
698    _start_stage: usize,
699    _stages: usize,
700    config: &MemoryConfig,
701) -> SignalResult<MemoryOptimizedResult<scirs2_core::numeric::Complex64>> {
702    // For simplicity, delegate to in-core implementation
703    // In a full implementation, this would do the remaining radix-2 _stages
704    memory_fft_in_core(input_file, output_file, n, config)
705}
706
707/// Memory-optimized spectrogram computation
708///
709/// Computes spectrograms of very large signals using sliding window approach
710/// with minimal memory footprint.
711#[allow(dead_code)]
712pub fn memory_optimized_spectrogram(
713    input_file: &str,
714    output_file: &str,
715    window_size: usize,
716    hop_size: usize,
717    _config: &MemoryConfig,
718) -> SignalResult<MemoryOptimizedResult<f64>> {
719    let start_time = Instant::now();
720
721    // Validate parameters
722    if window_size == 0 || hop_size == 0 {
723        return Err(SignalError::ValueError(
724            "Window _size and hop _size must be positive".to_string(),
725        ));
726    }
727
728    if hop_size > window_size {
729        return Err(SignalError::ValueError(
730            "Hop _size should not exceed window _size".to_string(),
731        ));
732    }
733
734    // Open files
735    let input_handle = File::open(input_file)
736        .map_err(|e| SignalError::ComputationError(format!("Cannot open input: {}", e)))?;
737    let output_handle = File::create(output_file)
738        .map_err(|e| SignalError::ComputationError(format!("Cannot create output: {}", e)))?;
739
740    let file_size = input_handle
741        .metadata()
742        .map_err(|e| SignalError::ComputationError(format!("Cannot get _file size: {}", e)))?
743        .len() as usize;
744
745    let n_samples = file_size / std::mem::size_of::<f64>();
746    let n_frames = (n_samples - window_size) / hop_size + 1;
747    let n_freqs = window_size / 2 + 1;
748
749    let mut input_reader = BufReader::new(input_handle);
750    let mut output_writer = BufWriter::new(output_handle);
751
752    // Create window function (Hann window)
753    let window: Vec<f64> = (0..window_size)
754        .map(|i| {
755            0.5 * (1.0
756                - ((2.0 * std::f64::consts::PI * i as f64) / (window_size as f64 - 1.0)).cos())
757        })
758        .collect();
759
760    let mut buffer = vec![0.0; window_size];
761    let mut fft_buffer = vec![Complex64::new(0.0, 0.0); window_size];
762    let mut magnitude_buffer = vec![0.0; n_freqs];
763
764    let mut total_io_time = 0;
765    let mut total_compute_time = 0;
766    let mut disk_ops = 0;
767
768    // Process each frame
769    for frame in 0..n_frames {
770        let io_start = Instant::now();
771
772        // Seek to frame position
773        let byte_offset = frame * hop_size * std::mem::size_of::<f64>();
774        input_reader
775            .seek(SeekFrom::Start(byte_offset as u64))
776            .map_err(|e| SignalError::ComputationError(format!("Seek error: {}", e)))?;
777
778        // Read frame data
779        for i in 0..window_size {
780            let mut bytes = [0u8; 8];
781            if input_reader.read_exact(&mut bytes).is_ok() {
782                buffer[i] = f64::from_le_bytes(bytes);
783            } else {
784                buffer[i] = 0.0; // Zero-pad if we reach end of _file
785            }
786        }
787
788        disk_ops += 1;
789        total_io_time += io_start.elapsed().as_millis();
790
791        let compute_start = Instant::now();
792
793        // Apply window and prepare FFT buffer
794        for i in 0..window_size {
795            fft_buffer[i] = Complex64::new(buffer[i] * window[i], 0.0);
796        }
797
798        // Compute FFT using scirs2_fft
799        let fft_result = scirs2_fft::fft(&fft_buffer, Some(fft_buffer.len()))
800            .map_err(|e| SignalError::ComputationError(format!("FFT failed: {}", e)))?;
801        for (i, c) in fft_result.iter().enumerate() {
802            fft_buffer[i] = *c;
803        }
804
805        // Compute magnitude spectrum (one-sided)
806        for i in 0..n_freqs {
807            magnitude_buffer[i] = fft_buffer[i].norm_sqr();
808            if i > 0 && i < window_size / 2 {
809                magnitude_buffer[i] *= 2.0; // Account for negative frequencies
810            }
811        }
812
813        total_compute_time += compute_start.elapsed().as_millis();
814
815        let io_start = Instant::now();
816
817        // Write magnitude spectrum
818        for &mag in &magnitude_buffer {
819            output_writer
820                .write_all(&mag.to_le_bytes())
821                .map_err(|e| SignalError::ComputationError(format!("Write error: {}", e)))?;
822        }
823
824        disk_ops += 1;
825        total_io_time += io_start.elapsed().as_millis();
826    }
827
828    output_writer
829        .flush()
830        .map_err(|e| SignalError::ComputationError(format!("Flush error: {}", e)))?;
831
832    let total_time = start_time.elapsed().as_millis();
833
834    let memory_stats = MemoryStats {
835        peak_memory: (window_size * 2 + n_freqs) * std::mem::size_of::<f64>(),
836        avg_memory: (window_size * 2 + n_freqs) * std::mem::size_of::<f64>() / 2,
837        cache_hits: 0,
838        cache_misses: 0,
839        disk_operations: disk_ops,
840    };
841
842    let timing_stats = TimingStats {
843        total_time_ms: total_time,
844        io_time_ms: total_io_time,
845        compute_time_ms: total_compute_time,
846        memory_mgmt_time_ms: total_time - total_io_time - total_compute_time,
847    };
848
849    Ok(MemoryOptimizedResult {
850        data: MemoryOptimizedData::OnDisk {
851            file_path: output_file.to_string(),
852            length: n_frames * n_freqs,
853            chunk_size: n_freqs,
854        },
855        memory_stats,
856        timing_stats,
857    })
858}
859
860#[cfg(test)]
861mod tests {
862    use super::*;
863    #[test]
864    fn test_memory_config_defaults() {
865        let config = MemoryConfig::default();
866        assert!(config.max_memory_bytes > 0);
867        assert!(config.chunk_size > 0);
868        assert!(config.overlap_size > 0);
869    }
870
871    #[test]
872    fn test_memory_optimized_data_variants() {
873        let in_memory = MemoryOptimizedData::InMemory(vec![1.0, 2.0, 3.0]);
874        let on_disk: MemoryOptimizedData<f64> = MemoryOptimizedData::OnDisk {
875            file_path: std::env::temp_dir()
876                .join("test.dat")
877                .to_string_lossy()
878                .into_owned(),
879            length: 1000,
880            chunk_size: 256,
881        };
882
883        match in_memory {
884            MemoryOptimizedData::InMemory(ref data) => {
885                assert_eq!(data.len(), 3);
886            }
887            _ => panic!("Expected InMemory variant"),
888        }
889
890        match on_disk {
891            MemoryOptimizedData::OnDisk { length, .. } => {
892                assert_eq!(length, 1000);
893            }
894            _ => panic!("Expected OnDisk variant"),
895        }
896    }
897
898    #[test]
899    fn test_create_test_signal_file() -> SignalResult<()> {
900        let test_file_path = std::env::temp_dir().join("test_signal.dat");
901        let test_file = test_file_path.to_str().expect("Operation failed");
902        let n_samples = 1000;
903
904        // Create test signal file
905        let mut file = File::create(test_file).expect("Operation failed");
906        for i in 0..n_samples {
907            let sample = (i as f64 * 0.1).sin();
908            file.write_all(&sample.to_le_bytes())
909                .expect("Operation failed");
910        }
911        file.flush().expect("Operation failed");
912
913        // Verify file was created
914        let metadata = fs::metadata(test_file).expect("Operation failed");
915        assert_eq!(metadata.len(), (n_samples * 8) as u64);
916
917        // Clean up
918        fs::remove_file(test_file).expect("Operation failed");
919
920        Ok(())
921    }
922}