memscope_rs/export/binary/
selective_json_exporter.rs

1//! Selective JSON exporter that integrates all optimization components
2//!
3//! This module provides a unified interface for exporting binary allocation data
4//! to JSON format with selective field processing, streaming output, and
5//! comprehensive performance optimizations.
6
7use crate::export::binary::batch_processor::{BatchProcessor, BatchProcessorConfig};
8use crate::export::binary::cache::{IndexCache, IndexCacheConfig};
9use crate::export::binary::error::BinaryExportError;
10use crate::export::binary::field_parser::{FieldParser, PartialAllocationInfo};
11use crate::export::binary::filter_engine::FilterEngine;
12
13use crate::export::binary::selective_reader::{
14    AllocationField, AllocationFilter, SelectiveReadOptionsBuilder,
15};
16use crate::export::binary::streaming_json_writer::{
17    SelectiveSerializationOptions, StreamingJsonStats, StreamingJsonWriter,
18    StreamingJsonWriterConfig,
19};
20use std::collections::HashSet;
21use std::fs::File;
22use std::io::BufWriter;
23use std::path::Path;
24use std::time::Instant;
25
26/// Configuration for selective JSON export operations
27#[derive(Debug, Clone)]
28pub struct SelectiveJsonExportConfig {
29    /// Configuration for streaming JSON writer
30    pub json_writer_config: StreamingJsonWriterConfig,
31
32    /// Configuration for batch processor
33    pub batch_processor_config: BatchProcessorConfig,
34
35    /// Configuration for index cache
36    pub index_cache_config: IndexCacheConfig,
37
38    /// Configuration for selective serialization
39    pub serialization_options: SelectiveSerializationOptions,
40
41    /// Enable parallel processing for multiple files
42    pub enable_parallel_processing: bool,
43
44    /// Maximum number of concurrent export operations
45    pub max_concurrent_exports: usize,
46
47    /// Enable comprehensive error recovery
48    pub enable_error_recovery: bool,
49
50    /// Enable detailed performance monitoring
51    pub enable_performance_monitoring: bool,
52}
53
54impl Default for SelectiveJsonExportConfig {
55    fn default() -> Self {
56        Self {
57            json_writer_config: StreamingJsonWriterConfig::default(),
58            batch_processor_config: BatchProcessorConfig::default(),
59            index_cache_config: IndexCacheConfig::default(),
60            serialization_options: SelectiveSerializationOptions::default(),
61            enable_parallel_processing: true,
62            max_concurrent_exports: 4,
63            enable_error_recovery: true,
64            enable_performance_monitoring: true,
65        }
66    }
67}
68
69/// Statistics for selective JSON export operations
70#[derive(Debug, Clone, Default)]
71pub struct SelectiveJsonExportStats {
72    /// Statistics from streaming JSON writer
73    pub json_writer_stats: StreamingJsonStats,
74
75    /// Total export time in microseconds
76    pub total_export_time_us: u64,
77
78    /// Number of files processed
79    pub files_processed: u32,
80
81    /// Total allocations exported
82    pub total_allocations_exported: u64,
83
84    /// Total bytes written across all files
85    pub total_bytes_written: u64,
86
87    /// Number of index cache hits
88    pub index_cache_hits: u64,
89
90    /// Number of index cache misses
91    pub index_cache_misses: u64,
92
93    /// Number of errors encountered and recovered
94    pub errors_recovered: u32,
95
96    /// Average export throughput (allocations per second)
97    pub avg_export_throughput: f64,
98
99    /// Memory efficiency (bytes per allocation)
100    pub memory_efficiency: f64,
101}
102
103impl SelectiveJsonExportStats {
104    /// Calculate overall cache hit rate
105    pub fn cache_hit_rate(&self) -> f64 {
106        let total_requests = self.index_cache_hits + self.index_cache_misses;
107        if total_requests == 0 {
108            0.0
109        } else {
110            (self.index_cache_hits as f64 / total_requests as f64) * 100.0
111        }
112    }
113
114    /// Calculate export efficiency (files per second)
115    pub fn export_efficiency(&self) -> f64 {
116        if self.total_export_time_us == 0 {
117            0.0
118        } else {
119            (self.files_processed as f64 * 1_000_000.0) / self.total_export_time_us as f64
120        }
121    }
122
123    /// Calculate compression ratio compared to full export
124    pub fn compression_ratio(&self) -> f64 {
125        if self.total_allocations_exported == 0 {
126            0.0
127        } else {
128            // Estimate full export size (approximate)
129            let estimated_full_size = self.total_allocations_exported * 500; // ~500 bytes per full allocation
130            if estimated_full_size == 0 {
131                0.0
132            } else {
133                (self.total_bytes_written as f64 / estimated_full_size as f64) * 100.0
134            }
135        }
136    }
137}
138
139/// Selective JSON exporter with integrated optimization components
140pub struct SelectiveJsonExporter {
141    /// Configuration
142    config: SelectiveJsonExportConfig,
143
144    /// Index cache for performance optimization
145    index_cache: IndexCache,
146
147    /// Batch processor for efficient record processing
148    batch_processor: BatchProcessor,
149
150    /// Filter engine for intelligent filtering
151    filter_engine: FilterEngine,
152
153    /// Field parser for selective field parsing
154    #[allow(dead_code)]
155    field_parser: FieldParser,
156
157    /// Export statistics
158    stats: SelectiveJsonExportStats,
159}
160
161impl SelectiveJsonExporter {
162    /// Create a new selective JSON exporter with default configuration
163    pub fn new() -> Result<Self, BinaryExportError> {
164        Self::with_config(SelectiveJsonExportConfig::default())
165    }
166
167    /// Create a new selective JSON exporter with custom configuration
168    pub fn with_config(config: SelectiveJsonExportConfig) -> Result<Self, BinaryExportError> {
169        let index_cache = IndexCache::new(config.index_cache_config.clone())?;
170        let batch_processor = BatchProcessor::with_config(config.batch_processor_config.clone());
171        // Create a placeholder filter engine - we'll update it when we have an index
172        let dummy_index = std::sync::Arc::new(crate::export::binary::index::BinaryIndex::new(
173            std::path::PathBuf::new(),
174            0,
175            0,
176            crate::export::binary::format::FileHeader::new_legacy(0),
177        ));
178        let filter_engine = FilterEngine::new(dummy_index);
179        let field_parser = FieldParser::new();
180
181        Ok(Self {
182            config,
183            index_cache,
184            batch_processor,
185            filter_engine,
186            field_parser,
187            stats: SelectiveJsonExportStats::default(),
188        })
189    }
190
191    /// Export a single binary file to JSON with selective fields
192    pub fn export_to_json_selective<P: AsRef<Path>, Q: AsRef<Path>>(
193        &mut self,
194        binary_path: P,
195        json_path: Q,
196        requested_fields: &HashSet<AllocationField>,
197        filters: &[AllocationFilter],
198    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
199        let export_start = Instant::now();
200
201        // Build or retrieve index
202        let index = self.get_or_build_index(&binary_path)?;
203
204        // Create selective read options
205        let _read_options = SelectiveReadOptionsBuilder::new()
206            .with_fields(requested_fields.clone())
207            .filters(filters.to_vec())
208            .build()?;
209
210        // Open binary file for reading
211        let mut binary_file = File::open(&binary_path)?;
212
213        // Create JSON writer
214        let json_file = File::create(&json_path)?;
215        let buffered_writer = BufWriter::new(json_file);
216        let mut json_writer = StreamingJsonWriter::with_config(
217            buffered_writer,
218            self.config.json_writer_config.clone(),
219        )?;
220
221        // Start JSON document
222        json_writer.write_header(index.record_count() as u64)?;
223
224        // Process records in batches
225        let mut processed_count = 0;
226        let batch_size = self.config.batch_processor_config.batch_size;
227
228        for batch_start in (0..index.record_count() as usize).step_by(batch_size) {
229            let batch_end = (batch_start + batch_size).min(index.record_count() as usize);
230            let batch_offsets: Vec<u64> = (batch_start..batch_end)
231                .filter_map(|i| index.get_record_offset(i))
232                .collect();
233
234            if batch_offsets.is_empty() {
235                continue;
236            }
237
238            // Apply pre-filtering using index
239            let filtered_indices = self.filter_engine.filter_candidates(filters)?;
240            let filtered_offsets: Vec<u64> = batch_offsets
241                .into_iter()
242                .enumerate()
243                .filter(|(i, _)| filtered_indices.contains(&(batch_start + i)))
244                .map(|(_, offset)| offset)
245                .collect();
246
247            if filtered_offsets.is_empty() {
248                continue;
249            }
250
251            // Read and parse records
252            let records = self.batch_processor.process_batch(
253                &mut binary_file,
254                &filtered_offsets,
255                requested_fields,
256            )?;
257
258            // Convert PartialAllocationInfo to AllocationInfo for filtering
259            let full_records: Vec<crate::core::types::AllocationInfo> = records
260                .records
261                .iter()
262                .map(|partial| partial.clone().to_full_allocation())
263                .collect();
264
265            // Apply precise filtering
266            let filtered_full_records = self
267                .filter_engine
268                .apply_precise_filters(full_records, filters)?;
269
270            // Convert back to PartialAllocationInfo for JSON writing
271            let filtered_records: Vec<PartialAllocationInfo> = filtered_full_records
272                .iter()
273                .map(|full| PartialAllocationInfo {
274                    ptr: Some(full.ptr),
275                    size: Some(full.size),
276                    var_name: Some(full.var_name.clone()),
277                    type_name: Some(full.type_name.clone()),
278                    scope_name: Some(full.scope_name.clone()),
279                    timestamp_alloc: Some(full.timestamp_alloc),
280                    timestamp_dealloc: Some(full.timestamp_dealloc),
281                    thread_id: Some(full.thread_id.clone()),
282                    borrow_count: Some(full.borrow_count),
283                    stack_trace: Some(full.stack_trace.clone()),
284                    is_leaked: Some(full.is_leaked),
285                    lifetime_ms: Some(full.lifetime_ms),
286                    // improve.md extensions
287                    borrow_info: full.borrow_info.clone(),
288                    clone_info: full.clone_info.clone(),
289                    ownership_history_available: Some(full.ownership_history_available),
290                })
291                .collect();
292
293            // Write records to JSON
294            json_writer.write_allocation_batch_with_options(
295                &filtered_records,
296                requested_fields,
297                &self.config.serialization_options,
298            )?;
299
300            processed_count += filtered_records.len();
301        }
302
303        // Finalize JSON document
304        let json_stats = json_writer.finalize()?;
305
306        // Update export statistics
307        self.stats.json_writer_stats = json_stats;
308        self.stats.total_export_time_us += export_start.elapsed().as_micros() as u64;
309        self.stats.files_processed += 1;
310        self.stats.total_allocations_exported += processed_count as u64;
311        self.stats.total_bytes_written += self.stats.json_writer_stats.bytes_written;
312
313        // Calculate derived statistics
314        self.update_derived_stats();
315
316        Ok(self.stats.clone())
317    }
318
319    /// Export multiple binary files to JSON in parallel
320    pub fn export_multiple_json_types<P: AsRef<Path>>(
321        &mut self,
322        binary_files: &[(P, P)], // (binary_path, json_path) pairs
323        requested_fields: &HashSet<AllocationField>,
324        filters: &[AllocationFilter],
325    ) -> Result<Vec<SelectiveJsonExportStats>, BinaryExportError> {
326        if !self.config.enable_parallel_processing || binary_files.len() <= 1 {
327            // Sequential processing
328            let mut results = Vec::new();
329            for (binary_path, json_path) in binary_files {
330                let stats = self.export_to_json_selective(
331                    binary_path,
332                    json_path,
333                    requested_fields,
334                    filters,
335                )?;
336                results.push(stats);
337            }
338            return Ok(results);
339        }
340
341        // Parallel processing (simplified implementation)
342        // In a real implementation, we would use proper parallel processing
343        let mut results = Vec::new();
344        let chunk_size = self.config.max_concurrent_exports;
345
346        for chunk in binary_files.chunks(chunk_size) {
347            for (binary_path, json_path) in chunk {
348                let stats = self.export_to_json_selective(
349                    binary_path,
350                    json_path,
351                    requested_fields,
352                    filters,
353                )?;
354                results.push(stats);
355            }
356        }
357
358        Ok(results)
359    }
360
361    /// Export to memory_analysis.json format (compatible with existing format)
362    pub fn export_memory_analysis_json<P: AsRef<Path>, Q: AsRef<Path>>(
363        &mut self,
364        binary_path: P,
365        json_path: Q,
366    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
367        // Use the memory_analysis_fields() method which includes improve.md extensions
368        let fields = AllocationField::memory_analysis_fields();
369
370        self.export_to_json_selective(binary_path, json_path, &fields, &[])
371    }
372
373    /// Export to lifetime.json format (compatible with existing format)
374    pub fn export_lifetime_json<P: AsRef<Path>, Q: AsRef<Path>>(
375        &mut self,
376        binary_path: P,
377        json_path: Q,
378    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
379        let export_start = Instant::now();
380
381        // Build or retrieve index
382        let index = self.get_or_build_index(&binary_path)?;
383
384        // Open binary file for reading
385        let mut binary_file = File::open(&binary_path)?;
386
387        // Create JSON writer with lifecycle_events array
388        let json_file = File::create(&json_path)?;
389        let buffered_writer = BufWriter::new(json_file);
390        let mut json_writer = StreamingJsonWriter::with_config(
391            buffered_writer,
392            self.config.json_writer_config.clone(),
393        )?;
394
395        // Start JSON document with lifecycle_events array
396        json_writer
397            .write_header_with_array_name(index.record_count() as u64, "lifecycle_events")?;
398
399        // Process records and write as lifecycle events
400        let fields = [
401            AllocationField::Ptr,
402            AllocationField::ScopeName,
403            AllocationField::Size,
404            AllocationField::TimestampAlloc,
405            AllocationField::TypeName,
406            AllocationField::VarName,
407        ]
408        .into_iter()
409        .collect();
410
411        let mut processed_count = 0;
412        let batch_size = self.config.batch_processor_config.batch_size;
413
414        for batch_start in (0..index.record_count() as usize).step_by(batch_size) {
415            let batch_end = (batch_start + batch_size).min(index.record_count() as usize);
416            let batch_offsets: Vec<u64> = (batch_start..batch_end)
417                .filter_map(|i| index.get_record_offset(i))
418                .collect();
419
420            if batch_offsets.is_empty() {
421                continue;
422            }
423
424            // Read and parse records
425            let records =
426                self.batch_processor
427                    .process_batch(&mut binary_file, &batch_offsets, &fields)?;
428
429            // Write records as lifecycle events
430            for record in &records.records {
431                json_writer.write_lifecycle_event(record, "allocation")?;
432            }
433
434            processed_count += records.records.len();
435        }
436
437        // Finalize JSON document
438        let json_stats = json_writer.finalize()?;
439
440        // Update export statistics
441        self.stats.json_writer_stats = json_stats;
442        self.stats.total_export_time_us += export_start.elapsed().as_micros() as u64;
443        self.stats.files_processed += 1;
444        self.stats.total_allocations_exported += processed_count as u64;
445        self.stats.total_bytes_written += self.stats.json_writer_stats.bytes_written;
446
447        self.update_derived_stats();
448
449        Ok(self.stats.clone())
450    }
451
452    /// Export to performance.json format (compatible with existing format)
453    pub fn export_performance_json<P: AsRef<Path>, Q: AsRef<Path>>(
454        &mut self,
455        binary_path: P,
456        json_path: Q,
457    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
458        let fields = [
459            AllocationField::BorrowCount,
460            AllocationField::Ptr,
461            AllocationField::Size,
462            AllocationField::ThreadId,
463            AllocationField::TimestampAlloc,
464            AllocationField::TypeName,
465            AllocationField::VarName,
466        ]
467        .into_iter()
468        .collect();
469
470        self.export_to_json_selective(binary_path, json_path, &fields, &[])
471    }
472
473    /// Export to unsafe_ffi.json format (compatible with existing format)
474    pub fn export_unsafe_ffi_json<P: AsRef<Path>, Q: AsRef<Path>>(
475        &mut self,
476        binary_path: P,
477        json_path: Q,
478    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
479        let export_start = Instant::now();
480
481        // Build or retrieve index
482        let index = self.get_or_build_index(&binary_path)?;
483
484        // Open binary file for reading
485        let mut binary_file = File::open(&binary_path)?;
486
487        // Create JSON writer
488        let json_file = File::create(&json_path)?;
489        let buffered_writer = BufWriter::new(json_file);
490        let mut json_writer = StreamingJsonWriter::with_config(
491            buffered_writer,
492            self.config.json_writer_config.clone(),
493        )?;
494
495        // Start JSON document with specific structure for unsafe_ffi
496        json_writer.write_raw("{\n")?;
497        json_writer.write_raw("  \"boundary_events\": [],\n")?;
498        json_writer.write_raw("  \"enhanced_ffi_data\": [\n")?;
499
500        // Process records
501        let fields = [
502            AllocationField::Ptr,
503            AllocationField::Size,
504            AllocationField::StackTrace,
505            AllocationField::ThreadId,
506            AllocationField::TimestampAlloc,
507            AllocationField::TypeName,
508            AllocationField::VarName,
509        ]
510        .into_iter()
511        .collect();
512
513        let mut processed_count = 0;
514        let batch_size = self.config.batch_processor_config.batch_size;
515
516        for batch_start in (0..index.record_count() as usize).step_by(batch_size) {
517            let batch_end = (batch_start + batch_size).min(index.record_count() as usize);
518            let batch_offsets: Vec<u64> = (batch_start..batch_end)
519                .filter_map(|i| index.get_record_offset(i))
520                .collect();
521
522            if batch_offsets.is_empty() {
523                continue;
524            }
525
526            // Read and parse records
527            let records =
528                self.batch_processor
529                    .process_batch(&mut binary_file, &batch_offsets, &fields)?;
530
531            // Write records with unsafe_ffi format
532            for record in &records.records {
533                json_writer.write_unsafe_ffi_allocation(record)?;
534            }
535
536            processed_count += records.records.len();
537        }
538
539        // Close the enhanced_ffi_data array and root object
540        json_writer.write_raw("\n  ]\n")?;
541        json_writer.write_raw("}\n")?;
542
543        // Finalize JSON document
544        let json_stats = json_writer.finalize()?;
545
546        // Update export statistics
547        self.stats.json_writer_stats = json_stats;
548        self.stats.total_export_time_us += export_start.elapsed().as_micros() as u64;
549        self.stats.files_processed += 1;
550        self.stats.total_allocations_exported += processed_count as u64;
551        self.stats.total_bytes_written += self.stats.json_writer_stats.bytes_written;
552
553        self.update_derived_stats();
554
555        Ok(self.stats.clone())
556    }
557
558    /// Export to complex_types.json format (compatible with existing format)
559    pub fn export_complex_types_json<P: AsRef<Path>, Q: AsRef<Path>>(
560        &mut self,
561        binary_path: P,
562        json_path: Q,
563    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
564        let export_start = Instant::now();
565
566        // Build or retrieve index
567        let index = self.get_or_build_index(&binary_path)?;
568
569        // Open binary file for reading
570        let mut binary_file = File::open(&binary_path)?;
571
572        // Create JSON writer
573        let json_file = File::create(&json_path)?;
574        let buffered_writer = BufWriter::new(json_file);
575        let mut json_writer = StreamingJsonWriter::with_config(
576            buffered_writer,
577            self.config.json_writer_config.clone(),
578        )?;
579
580        // Start JSON document with categorized_types structure
581        json_writer.write_raw("{\n")?;
582        json_writer.write_raw("  \"categorized_types\": {\n")?;
583        json_writer.write_raw("    \"primitive\": [\n")?;
584
585        // Process records
586        let fields = [
587            AllocationField::Ptr,
588            AllocationField::Size,
589            AllocationField::TypeName,
590            AllocationField::VarName,
591        ]
592        .into_iter()
593        .collect();
594
595        let mut processed_count = 0;
596        let batch_size = self.config.batch_processor_config.batch_size;
597
598        for batch_start in (0..index.record_count() as usize).step_by(batch_size) {
599            let batch_end = (batch_start + batch_size).min(index.record_count() as usize);
600            let batch_offsets: Vec<u64> = (batch_start..batch_end)
601                .filter_map(|i| index.get_record_offset(i))
602                .collect();
603
604            if batch_offsets.is_empty() {
605                continue;
606            }
607
608            // Read and parse records
609            let records =
610                self.batch_processor
611                    .process_batch(&mut binary_file, &batch_offsets, &fields)?;
612
613            // Write records with complex_types format
614            for record in &records.records {
615                json_writer.write_complex_types_allocation(record)?;
616            }
617
618            processed_count += records.records.len();
619        }
620
621        // Close the structure
622        json_writer.write_raw("\n    ]\n")?;
623        json_writer.write_raw("  }\n")?;
624        json_writer.write_raw("}\n")?;
625
626        // Finalize JSON document
627        let json_stats = json_writer.finalize()?;
628
629        // Update export statistics
630        self.stats.json_writer_stats = json_stats;
631        self.stats.total_export_time_us += export_start.elapsed().as_micros() as u64;
632        self.stats.files_processed += 1;
633        self.stats.total_allocations_exported += processed_count as u64;
634        self.stats.total_bytes_written += self.stats.json_writer_stats.bytes_written;
635
636        self.update_derived_stats();
637
638        Ok(self.stats.clone())
639    }
640
641    /// Export all 5 JSON types in the standard format (compatible with existing output)
642    pub fn export_all_standard_json_types<P: AsRef<Path>, Q: AsRef<Path>>(
643        &mut self,
644        binary_path: P,
645        output_dir: Q,
646        base_name: &str,
647    ) -> Result<Vec<SelectiveJsonExportStats>, BinaryExportError> {
648        let output_dir = output_dir.as_ref();
649        let mut results = Vec::new();
650
651        // Export memory_analysis.json
652        let memory_path = output_dir.join(format!("{base_name}_memory_analysis.json"));
653        results.push(self.export_memory_analysis_json(&binary_path, &memory_path)?);
654
655        // Export lifetime.json
656        let lifetime_path = output_dir.join(format!("{base_name}_lifetime.json"));
657        results.push(self.export_lifetime_json(&binary_path, &lifetime_path)?);
658
659        // Export performance.json
660        let performance_path = output_dir.join(format!("{base_name}_performance.json"));
661        results.push(self.export_performance_json(&binary_path, &performance_path)?);
662
663        // Export unsafe_ffi.json
664        let unsafe_ffi_path = output_dir.join(format!("{base_name}_unsafe_ffi.json"));
665        results.push(self.export_unsafe_ffi_json(&binary_path, &unsafe_ffi_path)?);
666
667        // Export complex_types.json
668        let complex_types_path = output_dir.join(format!("{base_name}_complex_types.json"));
669        results.push(self.export_complex_types_json(&binary_path, &complex_types_path)?);
670
671        Ok(results)
672    }
673
674    /// Export with automatic field selection based on file analysis
675    pub fn export_with_auto_field_selection<P: AsRef<Path>>(
676        &mut self,
677        binary_path: P,
678        json_path: P,
679        optimization_level: OptimizationLevel,
680    ) -> Result<SelectiveJsonExportStats, BinaryExportError> {
681        // Analyze file to determine optimal field selection
682        let index = self.get_or_build_index(&binary_path)?;
683        let auto_fields = self.analyze_optimal_fields(&index, optimization_level)?;
684        let auto_filters = self.analyze_optimal_filters(&index, optimization_level)?;
685
686        self.export_to_json_selective(&binary_path, &json_path, &auto_fields, &auto_filters)
687    }
688
689    /// Get current export statistics
690    pub fn get_stats(&self) -> &SelectiveJsonExportStats {
691        &self.stats
692    }
693
694    /// Reset export statistics
695    pub fn reset_stats(&mut self) {
696        self.stats = SelectiveJsonExportStats::default();
697    }
698
699    /// Clear all caches
700    pub fn clear_caches(&mut self) {
701        let _ = self.index_cache.clear();
702        self.batch_processor.clear_cache();
703    }
704
705    // Private helper methods
706
707    /// Get or build index for the given binary file
708    fn get_or_build_index<P: AsRef<Path>>(
709        &mut self,
710        binary_path: P,
711    ) -> Result<crate::export::binary::index::BinaryIndex, BinaryExportError> {
712        let path = binary_path.as_ref();
713
714        // Use the cache's get_or_build_index method
715        let index_builder = crate::export::binary::index_builder::BinaryIndexBuilder::new();
716        let index = self.index_cache.get_or_build_index(path, &index_builder)?;
717
718        // Update statistics based on cache behavior
719        let cache_stats = self.index_cache.get_stats();
720        self.stats.index_cache_hits = cache_stats.cache_hits;
721        self.stats.index_cache_misses = cache_stats.cache_misses;
722
723        Ok(index)
724    }
725
726    /// Analyze optimal fields based on file characteristics
727    fn analyze_optimal_fields(
728        &self,
729        index: &crate::export::binary::index::BinaryIndex,
730        optimization_level: OptimizationLevel,
731    ) -> Result<HashSet<AllocationField>, BinaryExportError> {
732        let mut fields = HashSet::new();
733
734        // Always include basic fields
735        fields.insert(AllocationField::Ptr);
736        fields.insert(AllocationField::Size);
737        fields.insert(AllocationField::TimestampAlloc);
738
739        match optimization_level {
740            OptimizationLevel::Minimal => {
741                // Only basic fields
742            }
743            OptimizationLevel::Balanced => {
744                // Add commonly useful fields
745                fields.insert(AllocationField::VarName);
746                fields.insert(AllocationField::TypeName);
747                fields.insert(AllocationField::ThreadId);
748                fields.insert(AllocationField::IsLeaked);
749            }
750            OptimizationLevel::Comprehensive => {
751                // Add all available fields
752                fields.extend(AllocationField::all_fields());
753            }
754        }
755
756        // Remove fields that are not present in the file
757        let available_fields = self.analyze_available_fields(index)?;
758        fields.retain(|field| available_fields.contains(field));
759
760        Ok(fields)
761    }
762
763    /// Analyze optimal filters based on file characteristics
764    fn analyze_optimal_filters(
765        &self,
766        _index: &crate::export::binary::index::BinaryIndex,
767        optimization_level: OptimizationLevel,
768    ) -> Result<Vec<AllocationFilter>, BinaryExportError> {
769        let mut filters = Vec::new();
770
771        match optimization_level {
772            OptimizationLevel::Minimal => {
773                // No filters for maximum compatibility
774            }
775            OptimizationLevel::Balanced => {
776                // Filter out very small allocations
777                filters.push(AllocationFilter::SizeRange(32, usize::MAX));
778            }
779            OptimizationLevel::Comprehensive => {
780                // More aggressive filtering
781                filters.push(AllocationFilter::SizeRange(16, usize::MAX));
782            }
783        }
784
785        Ok(filters)
786    }
787
788    /// Analyze which fields are available in the file
789    fn analyze_available_fields(
790        &self,
791        _index: &crate::export::binary::index::BinaryIndex,
792    ) -> Result<HashSet<AllocationField>, BinaryExportError> {
793        // For now, assume all fields are available
794        // In a real implementation, we would analyze the file format
795        Ok(AllocationField::all_fields())
796    }
797
798    /// Update derived statistics
799    fn update_derived_stats(&mut self) {
800        if self.stats.total_export_time_us > 0 {
801            self.stats.avg_export_throughput = (self.stats.total_allocations_exported as f64
802                * 1_000_000.0)
803                / self.stats.total_export_time_us as f64;
804        }
805
806        if self.stats.total_allocations_exported > 0 {
807            self.stats.memory_efficiency = self.stats.total_bytes_written as f64
808                / self.stats.total_allocations_exported as f64;
809        }
810    }
811}
812
813impl Default for SelectiveJsonExporter {
814    fn default() -> Self {
815        Self::new().expect("Failed to create default SelectiveJsonExporter")
816    }
817}
818
819/// Optimization levels for automatic field selection
820#[derive(Debug, Clone, Copy, PartialEq, Eq)]
821pub enum OptimizationLevel {
822    /// Minimal fields for maximum performance
823    Minimal,
824    /// Balanced selection of useful fields
825    Balanced,
826    /// Comprehensive field selection
827    Comprehensive,
828}
829
830/// Builder for selective JSON export configuration
831pub struct SelectiveJsonExportConfigBuilder {
832    config: SelectiveJsonExportConfig,
833}
834
835impl SelectiveJsonExportConfigBuilder {
836    /// Create a new configuration builder
837    pub fn new() -> Self {
838        Self {
839            config: SelectiveJsonExportConfig::default(),
840        }
841    }
842
843    /// Set JSON writer configuration
844    pub fn json_writer_config(mut self, config: StreamingJsonWriterConfig) -> Self {
845        self.config.json_writer_config = config;
846        self
847    }
848
849    /// Set batch processor configuration
850    pub fn batch_processor_config(mut self, config: BatchProcessorConfig) -> Self {
851        self.config.batch_processor_config = config;
852        self
853    }
854
855    /// Set index cache configuration
856    pub fn index_cache_config(mut self, config: IndexCacheConfig) -> Self {
857        self.config.index_cache_config = config;
858        self
859    }
860
861    /// Set serialization options
862    pub fn serialization_options(mut self, options: SelectiveSerializationOptions) -> Self {
863        self.config.serialization_options = options;
864        self
865    }
866
867    /// Enable or disable parallel processing
868    pub fn parallel_processing(mut self, enabled: bool) -> Self {
869        self.config.enable_parallel_processing = enabled;
870        self
871    }
872
873    /// Set maximum concurrent exports
874    pub fn max_concurrent_exports(mut self, max: usize) -> Self {
875        self.config.max_concurrent_exports = max;
876        self
877    }
878
879    /// Enable or disable error recovery
880    pub fn error_recovery(mut self, enabled: bool) -> Self {
881        self.config.enable_error_recovery = enabled;
882        self
883    }
884
885    /// Enable or disable performance monitoring
886    pub fn performance_monitoring(mut self, enabled: bool) -> Self {
887        self.config.enable_performance_monitoring = enabled;
888        self
889    }
890
891    /// Build the configuration
892    pub fn build(self) -> SelectiveJsonExportConfig {
893        self.config
894    }
895}
896
897impl Default for SelectiveJsonExportConfigBuilder {
898    fn default() -> Self {
899        Self::new()
900    }
901}
902
903#[cfg(test)]
904mod tests {
905    use super::*;
906    use std::collections::HashSet;
907    use tempfile::TempDir;
908
909    fn create_test_exporter() -> SelectiveJsonExporter {
910        let temp_dir = TempDir::new().expect("Failed to create temp dir");
911        let cache_config = IndexCacheConfig {
912            cache_directory: temp_dir.path().to_path_buf(),
913            max_entries: 100,
914            max_age_seconds: 3600,
915            enable_compression: false,
916        };
917
918        let config = SelectiveJsonExportConfig {
919            index_cache_config: cache_config,
920            enable_parallel_processing: false, // Disable for testing
921            max_concurrent_exports: 1,
922            enable_error_recovery: true,
923            enable_performance_monitoring: true,
924            ..Default::default()
925        };
926
927        SelectiveJsonExporter::with_config(config).expect("Failed to create test exporter")
928    }
929
930    #[test]
931    fn test_selective_json_exporter_creation() {
932        // Use a temporary directory for testing to avoid permission issues
933        let temp_dir = tempfile::TempDir::new().expect("Failed to get test value");
934        let cache_config = IndexCacheConfig {
935            cache_directory: temp_dir.path().to_path_buf(),
936            max_entries: 1000,
937            max_age_seconds: 3600,
938            enable_compression: false,
939        };
940
941        let config = SelectiveJsonExportConfig {
942            index_cache_config: cache_config,
943            ..Default::default()
944        };
945
946        let exporter = SelectiveJsonExporter::with_config(config);
947        assert!(
948            exporter.is_ok(),
949            "Failed to create SelectiveJsonExporter: {:?}",
950            exporter.err()
951        );
952    }
953
954    #[test]
955    fn test_selective_json_exporter_new() {
956        // Test the new() method which uses default config
957        let result = SelectiveJsonExporter::new();
958        // This might fail due to default cache directory permissions, but we test the attempt
959        match result {
960            Ok(_exporter) => {
961                // Success case
962            }
963            Err(_) => {
964                // Expected failure due to default cache directory
965            }
966        }
967    }
968
969    #[test]
970    fn test_selective_json_exporter_default() {
971        // Test the Default trait implementation
972        let result = std::panic::catch_unwind(|| {
973            let _exporter = SelectiveJsonExporter::default();
974        });
975        // This might panic due to default cache directory permissions, which is expected
976        match result {
977            Ok(_) => {
978                // Success case
979            }
980            Err(_) => {
981                // Expected panic due to default cache directory
982            }
983        }
984    }
985
986    #[test]
987    fn test_config_builder() {
988        let config = SelectiveJsonExportConfigBuilder::new()
989            .parallel_processing(false)
990            .max_concurrent_exports(2)
991            .error_recovery(false)
992            .build();
993
994        assert!(!config.enable_parallel_processing);
995        assert_eq!(config.max_concurrent_exports, 2);
996        assert!(!config.enable_error_recovery);
997    }
998
999    #[test]
1000    fn test_config_builder_all_methods() {
1001        let json_writer_config = StreamingJsonWriterConfig::default();
1002        let batch_processor_config = BatchProcessorConfig::default();
1003        let temp_dir = TempDir::new().expect("Failed to create temp dir");
1004        let index_cache_config = IndexCacheConfig {
1005            cache_directory: temp_dir.path().to_path_buf(),
1006            max_entries: 500,
1007            max_age_seconds: 1800,
1008            enable_compression: true,
1009        };
1010        let serialization_options = SelectiveSerializationOptions::default();
1011
1012        let config = SelectiveJsonExportConfigBuilder::new()
1013            .json_writer_config(json_writer_config.clone())
1014            .batch_processor_config(batch_processor_config.clone())
1015            .index_cache_config(index_cache_config.clone())
1016            .serialization_options(serialization_options.clone())
1017            .parallel_processing(true)
1018            .max_concurrent_exports(8)
1019            .error_recovery(true)
1020            .performance_monitoring(false)
1021            .build();
1022
1023        assert_eq!(
1024            config.json_writer_config.buffer_size,
1025            json_writer_config.buffer_size
1026        );
1027        assert_eq!(
1028            config.batch_processor_config.batch_size,
1029            batch_processor_config.batch_size
1030        );
1031        assert_eq!(
1032            config.index_cache_config.max_entries,
1033            index_cache_config.max_entries
1034        );
1035        assert!(config.enable_parallel_processing);
1036        assert_eq!(config.max_concurrent_exports, 8);
1037        assert!(config.enable_error_recovery);
1038        assert!(!config.enable_performance_monitoring);
1039    }
1040
1041    #[test]
1042    fn test_config_builder_default() {
1043        let builder1 = SelectiveJsonExportConfigBuilder::new();
1044        let builder2 = SelectiveJsonExportConfigBuilder::default();
1045
1046        let config1 = builder1.build();
1047        let config2 = builder2.build();
1048
1049        assert_eq!(
1050            config1.enable_parallel_processing,
1051            config2.enable_parallel_processing
1052        );
1053        assert_eq!(
1054            config1.max_concurrent_exports,
1055            config2.max_concurrent_exports
1056        );
1057        assert_eq!(config1.enable_error_recovery, config2.enable_error_recovery);
1058    }
1059
1060    #[test]
1061    fn test_selective_json_export_config_default() {
1062        let config = SelectiveJsonExportConfig::default();
1063
1064        assert!(config.enable_parallel_processing);
1065        assert_eq!(config.max_concurrent_exports, 4);
1066        assert!(config.enable_error_recovery);
1067        assert!(config.enable_performance_monitoring);
1068    }
1069
1070    #[test]
1071    fn test_optimization_levels() {
1072        assert_eq!(OptimizationLevel::Minimal, OptimizationLevel::Minimal);
1073        assert_eq!(OptimizationLevel::Balanced, OptimizationLevel::Balanced);
1074        assert_eq!(
1075            OptimizationLevel::Comprehensive,
1076            OptimizationLevel::Comprehensive
1077        );
1078
1079        assert_ne!(OptimizationLevel::Minimal, OptimizationLevel::Balanced);
1080        assert_ne!(
1081            OptimizationLevel::Balanced,
1082            OptimizationLevel::Comprehensive
1083        );
1084        assert_ne!(OptimizationLevel::Minimal, OptimizationLevel::Comprehensive);
1085    }
1086
1087    #[test]
1088    fn test_export_stats_default() {
1089        let stats = SelectiveJsonExportStats::default();
1090
1091        assert_eq!(stats.total_export_time_us, 0);
1092        assert_eq!(stats.files_processed, 0);
1093        assert_eq!(stats.total_allocations_exported, 0);
1094        assert_eq!(stats.total_bytes_written, 0);
1095        assert_eq!(stats.index_cache_hits, 0);
1096        assert_eq!(stats.index_cache_misses, 0);
1097        assert_eq!(stats.errors_recovered, 0);
1098        assert_eq!(stats.avg_export_throughput, 0.0);
1099        assert_eq!(stats.memory_efficiency, 0.0);
1100    }
1101
1102    #[test]
1103    fn test_export_stats_calculations() {
1104        let stats = SelectiveJsonExportStats {
1105            index_cache_hits: 8,
1106            index_cache_misses: 2,
1107            total_export_time_us: 1_000_000, // 1 second
1108            files_processed: 5,
1109            total_allocations_exported: 1000,
1110            total_bytes_written: 50000,
1111            ..Default::default()
1112        };
1113
1114        assert_eq!(stats.cache_hit_rate(), 80.0);
1115        assert_eq!(stats.export_efficiency(), 5.0); // 5 files per second
1116        assert!(stats.compression_ratio() > 0.0);
1117    }
1118
1119    #[test]
1120    fn test_export_stats_edge_cases() {
1121        // Test with zero values
1122        let stats = SelectiveJsonExportStats::default();
1123
1124        assert_eq!(stats.cache_hit_rate(), 0.0);
1125        assert_eq!(stats.export_efficiency(), 0.0);
1126        assert_eq!(stats.compression_ratio(), 0.0);
1127
1128        // Test with only cache misses
1129        let stats = SelectiveJsonExportStats {
1130            index_cache_hits: 0,
1131            index_cache_misses: 10,
1132            total_export_time_us: 1_000_000,
1133            files_processed: 1,
1134            total_allocations_exported: 100,
1135            total_bytes_written: 5000,
1136            ..Default::default()
1137        };
1138
1139        assert_eq!(stats.cache_hit_rate(), 0.0);
1140        assert_eq!(stats.export_efficiency(), 1.0);
1141        assert!(stats.compression_ratio() > 0.0);
1142
1143        // Test with only cache hits
1144        let stats = SelectiveJsonExportStats {
1145            index_cache_hits: 10,
1146            index_cache_misses: 0,
1147            total_export_time_us: 1_000_000,
1148            files_processed: 1,
1149            total_allocations_exported: 100,
1150            total_bytes_written: 5000,
1151            ..Default::default()
1152        };
1153
1154        assert_eq!(stats.cache_hit_rate(), 100.0);
1155    }
1156
1157    #[test]
1158    fn test_field_analysis() {
1159        let mut exporter = create_test_exporter();
1160
1161        // Test basic functionality without actual file processing
1162        let stats = exporter.get_stats();
1163        assert_eq!(stats.files_processed, 0);
1164        assert_eq!(stats.total_allocations_exported, 0);
1165
1166        exporter.reset_stats();
1167        assert_eq!(exporter.get_stats().files_processed, 0);
1168    }
1169
1170    #[test]
1171    fn test_cache_operations() {
1172        let mut exporter = create_test_exporter();
1173
1174        // Test cache clearing
1175        exporter.clear_caches();
1176
1177        // Verify stats are still accessible
1178        let stats = exporter.get_stats();
1179        assert_eq!(stats.index_cache_hits, 0);
1180        assert_eq!(stats.index_cache_misses, 0);
1181    }
1182
1183    #[test]
1184    fn test_stats_reset() {
1185        let mut exporter = create_test_exporter();
1186
1187        // Manually set some stats
1188        exporter.stats.files_processed = 5;
1189        exporter.stats.total_allocations_exported = 1000;
1190        exporter.stats.total_bytes_written = 50000;
1191
1192        // Verify stats are set
1193        assert_eq!(exporter.get_stats().files_processed, 5);
1194        assert_eq!(exporter.get_stats().total_allocations_exported, 1000);
1195
1196        // Reset stats
1197        exporter.reset_stats();
1198
1199        // Verify stats are reset
1200        assert_eq!(exporter.get_stats().files_processed, 0);
1201        assert_eq!(exporter.get_stats().total_allocations_exported, 0);
1202        assert_eq!(exporter.get_stats().total_bytes_written, 0);
1203    }
1204
1205    #[test]
1206    fn test_export_multiple_json_types_empty() {
1207        let mut exporter = create_test_exporter();
1208        let binary_files: Vec<(&str, &str)> = vec![];
1209        let fields = HashSet::new();
1210        let filters = vec![];
1211
1212        let result = exporter.export_multiple_json_types(&binary_files, &fields, &filters);
1213        assert!(result.is_ok());
1214
1215        let results = result.unwrap();
1216        assert_eq!(results.len(), 0);
1217    }
1218
1219    #[test]
1220    fn test_export_multiple_json_types_single_file() {
1221        let mut exporter = create_test_exporter();
1222
1223        // Create temporary files for testing
1224        let temp_dir = TempDir::new().expect("Failed to create temp dir");
1225        let binary_path = temp_dir.path().join("test.bin");
1226        let json_path = temp_dir.path().join("test.json");
1227
1228        // Create a dummy binary file
1229        std::fs::write(&binary_path, b"dummy binary data").expect("Failed to write test file");
1230
1231        let binary_files = vec![(&binary_path, &json_path)];
1232        let fields = HashSet::new();
1233        let filters = vec![];
1234
1235        // This will likely fail due to invalid binary format, but we test the code path
1236        let result = exporter.export_multiple_json_types(&binary_files, &fields, &filters);
1237        // We expect this to fail with invalid binary format
1238        assert!(result.is_err());
1239    }
1240
1241    #[test]
1242    fn test_analyze_optimal_fields() {
1243        let exporter = create_test_exporter();
1244
1245        // Create a dummy index for testing
1246        let temp_dir = TempDir::new().expect("Failed to create temp dir");
1247        let dummy_path = temp_dir.path().join("dummy.bin");
1248        let header = crate::export::binary::format::FileHeader::new_legacy(0);
1249        let index = crate::export::binary::index::BinaryIndex::new(dummy_path, 0, 0, header);
1250
1251        // Test minimal optimization
1252        let fields = exporter.analyze_optimal_fields(&index, OptimizationLevel::Minimal);
1253        assert!(fields.is_ok());
1254        let fields = fields.unwrap();
1255        assert!(fields.contains(&AllocationField::Ptr));
1256        assert!(fields.contains(&AllocationField::Size));
1257        assert!(fields.contains(&AllocationField::TimestampAlloc));
1258
1259        // Test balanced optimization
1260        let fields = exporter.analyze_optimal_fields(&index, OptimizationLevel::Balanced);
1261        assert!(fields.is_ok());
1262        let fields = fields.unwrap();
1263        assert!(fields.contains(&AllocationField::Ptr));
1264        assert!(fields.contains(&AllocationField::VarName));
1265        assert!(fields.contains(&AllocationField::TypeName));
1266
1267        // Test comprehensive optimization
1268        let fields = exporter.analyze_optimal_fields(&index, OptimizationLevel::Comprehensive);
1269        assert!(fields.is_ok());
1270        let fields = fields.unwrap();
1271        assert!(!fields.is_empty());
1272    }
1273
1274    #[test]
1275    fn test_analyze_optimal_filters() {
1276        let exporter = create_test_exporter();
1277
1278        // Create a dummy index for testing
1279        let temp_dir = TempDir::new().expect("Failed to create temp dir");
1280        let dummy_path = temp_dir.path().join("dummy.bin");
1281        let header = crate::export::binary::format::FileHeader::new_legacy(0);
1282        let index = crate::export::binary::index::BinaryIndex::new(dummy_path, 0, 0, header);
1283
1284        // Test minimal optimization
1285        let filters = exporter.analyze_optimal_filters(&index, OptimizationLevel::Minimal);
1286        assert!(filters.is_ok());
1287        let filters = filters.unwrap();
1288        assert_eq!(filters.len(), 0);
1289
1290        // Test balanced optimization
1291        let filters = exporter.analyze_optimal_filters(&index, OptimizationLevel::Balanced);
1292        assert!(filters.is_ok());
1293        let filters = filters.unwrap();
1294        assert_eq!(filters.len(), 1);
1295        if let AllocationFilter::SizeRange(min, max) = &filters[0] {
1296            assert_eq!(*min, 32);
1297            assert_eq!(*max, usize::MAX);
1298        } else {
1299            panic!("Expected SizeRange filter");
1300        }
1301
1302        // Test comprehensive optimization
1303        let filters = exporter.analyze_optimal_filters(&index, OptimizationLevel::Comprehensive);
1304        assert!(filters.is_ok());
1305        let filters = filters.unwrap();
1306        assert_eq!(filters.len(), 1);
1307        if let AllocationFilter::SizeRange(min, max) = &filters[0] {
1308            assert_eq!(*min, 16);
1309            assert_eq!(*max, usize::MAX);
1310        } else {
1311            panic!("Expected SizeRange filter");
1312        }
1313    }
1314
1315    #[test]
1316    fn test_analyze_available_fields() {
1317        let exporter = create_test_exporter();
1318
1319        // Create a dummy index for testing
1320        let temp_dir = TempDir::new().expect("Failed to create temp dir");
1321        let dummy_path = temp_dir.path().join("dummy.bin");
1322        let header = crate::export::binary::format::FileHeader::new_legacy(0);
1323        let index = crate::export::binary::index::BinaryIndex::new(dummy_path, 0, 0, header);
1324
1325        let available_fields = exporter.analyze_available_fields(&index);
1326        assert!(available_fields.is_ok());
1327
1328        let fields = available_fields.unwrap();
1329        assert!(!fields.is_empty());
1330        // Should contain all fields for now
1331        assert!(fields.contains(&AllocationField::Ptr));
1332        assert!(fields.contains(&AllocationField::Size));
1333    }
1334
1335    #[test]
1336    fn test_update_derived_stats() {
1337        let mut exporter = create_test_exporter();
1338
1339        // Set some base stats
1340        exporter.stats.total_export_time_us = 1_000_000; // 1 second
1341        exporter.stats.total_allocations_exported = 1000;
1342        exporter.stats.total_bytes_written = 50000;
1343
1344        // Update derived stats
1345        exporter.update_derived_stats();
1346
1347        // Check that derived stats are calculated
1348        assert!(exporter.stats.avg_export_throughput > 0.0);
1349        assert!(exporter.stats.memory_efficiency > 0.0);
1350
1351        // Test with zero time - the method only updates if time > 0
1352        let original_throughput = exporter.stats.avg_export_throughput;
1353        exporter.stats.total_export_time_us = 0;
1354        exporter.update_derived_stats();
1355        // The throughput should remain unchanged when time is 0
1356        assert_eq!(exporter.stats.avg_export_throughput, original_throughput);
1357
1358        // Test with zero allocations - the method only updates if allocations > 0
1359        let original_efficiency = exporter.stats.memory_efficiency;
1360        exporter.stats.total_export_time_us = 1_000_000;
1361        exporter.stats.total_allocations_exported = 0;
1362        exporter.update_derived_stats();
1363        // The efficiency should remain unchanged when allocations is 0
1364        assert_eq!(exporter.stats.memory_efficiency, original_efficiency);
1365    }
1366
1367    #[test]
1368    fn test_compression_ratio_calculation() {
1369        let stats = SelectiveJsonExportStats {
1370            total_allocations_exported: 100,
1371            total_bytes_written: 25000, // 250 bytes per allocation
1372            ..Default::default()
1373        };
1374
1375        let ratio = stats.compression_ratio();
1376        // Expected: (25000 / (100 * 500)) * 100 = 50%
1377        assert_eq!(ratio, 50.0);
1378
1379        // Test with zero allocations
1380        let stats = SelectiveJsonExportStats {
1381            total_allocations_exported: 0,
1382            total_bytes_written: 1000,
1383            ..Default::default()
1384        };
1385
1386        let ratio = stats.compression_ratio();
1387        assert_eq!(ratio, 0.0);
1388    }
1389
1390    #[test]
1391    fn test_export_efficiency_calculation() {
1392        let stats = SelectiveJsonExportStats {
1393            total_export_time_us: 2_000_000, // 2 seconds
1394            files_processed: 10,
1395            ..Default::default()
1396        };
1397
1398        let efficiency = stats.export_efficiency();
1399        // Expected: (10 * 1_000_000) / 2_000_000 = 5.0 files per second
1400        assert_eq!(efficiency, 5.0);
1401
1402        // Test with zero time
1403        let stats = SelectiveJsonExportStats {
1404            total_export_time_us: 0,
1405            files_processed: 10,
1406            ..Default::default()
1407        };
1408
1409        let efficiency = stats.export_efficiency();
1410        assert_eq!(efficiency, 0.0);
1411    }
1412
1413    #[test]
1414    fn test_cache_hit_rate_calculation() {
1415        // Test normal case
1416        let stats = SelectiveJsonExportStats {
1417            index_cache_hits: 75,
1418            index_cache_misses: 25,
1419            ..Default::default()
1420        };
1421
1422        let hit_rate = stats.cache_hit_rate();
1423        assert_eq!(hit_rate, 75.0);
1424
1425        // Test with no requests
1426        let stats = SelectiveJsonExportStats {
1427            index_cache_hits: 0,
1428            index_cache_misses: 0,
1429            ..Default::default()
1430        };
1431
1432        let hit_rate = stats.cache_hit_rate();
1433        assert_eq!(hit_rate, 0.0);
1434
1435        // Test with perfect hit rate
1436        let stats = SelectiveJsonExportStats {
1437            index_cache_hits: 100,
1438            index_cache_misses: 0,
1439            ..Default::default()
1440        };
1441
1442        let hit_rate = stats.cache_hit_rate();
1443        assert_eq!(hit_rate, 100.0);
1444    }
1445
1446    #[test]
1447    fn test_debug_implementations() {
1448        let config = SelectiveJsonExportConfig::default();
1449        let debug_str = format!("{:?}", config);
1450        assert!(debug_str.contains("SelectiveJsonExportConfig"));
1451
1452        let stats = SelectiveJsonExportStats::default();
1453        let debug_str = format!("{:?}", stats);
1454        assert!(debug_str.contains("SelectiveJsonExportStats"));
1455
1456        let optimization_level = OptimizationLevel::Balanced;
1457        let debug_str = format!("{:?}", optimization_level);
1458        assert!(debug_str.contains("Balanced"));
1459    }
1460
1461    #[test]
1462    fn test_clone_implementations() {
1463        let config = SelectiveJsonExportConfig::default();
1464        let cloned_config = config.clone();
1465        assert_eq!(
1466            config.enable_parallel_processing,
1467            cloned_config.enable_parallel_processing
1468        );
1469        assert_eq!(
1470            config.max_concurrent_exports,
1471            cloned_config.max_concurrent_exports
1472        );
1473
1474        let stats = SelectiveJsonExportStats::default();
1475        let cloned_stats = stats.clone();
1476        assert_eq!(stats.files_processed, cloned_stats.files_processed);
1477        assert_eq!(
1478            stats.total_allocations_exported,
1479            cloned_stats.total_allocations_exported
1480        );
1481
1482        let optimization_level = OptimizationLevel::Comprehensive;
1483        let cloned_level = optimization_level;
1484        assert_eq!(optimization_level, cloned_level);
1485    }
1486}