memscope_rs/export/
optimized_json_export.rs

1//! Optimized JSON export implementation with performance improvements
2//!
3//! This module provides highly optimized JSON export functionality that addresses
4//! the main performance bottlenecks identified in the current implementation.
5
6use crate::analysis::security_violation_analyzer::{
7    AnalysisConfig, SecurityViolationAnalyzer, ViolationSeverity,
8};
9use crate::analysis::unsafe_ffi_tracker::{get_global_unsafe_ffi_tracker, SafetyViolation};
10use crate::core::tracker::MemoryTracker;
11use crate::core::types::{AllocationInfo, TrackingResult};
12use crate::export::adaptive_performance::AdaptivePerformanceOptimizer;
13// use crate::export::fast_export_coordinator::FastExportCoordinator;
14use crate::export::schema_validator::SchemaValidator;
15use rayon::prelude::*;
16
17use std::{
18    collections::HashMap,
19    fs::File,
20    io::{BufWriter, Write},
21    path::Path,
22    sync::LazyLock,
23};
24
25/// Json file types
26#[derive(Debug, Clone, Copy, PartialEq, Eq)]
27pub enum JsonFileType {
28    /// memory_analysis.json
29    MemoryAnalysis,
30    /// lifetime.json
31    Lifetime,
32    /// unsafe_ffi.json
33    UnsafeFfi,
34    /// performance.json
35    Performance,
36    /// complex_types.json
37    ComplexTypes,
38    /// security_violations.json
39    SecurityViolations,
40    // AsyncAnalysis,    // asnyc analysis
41    // ThreadSafety,     // Threadsafety
42    // MemoryLeaks,      // Memory leak analysis
43    // TypeInference,    // typeinference analysis
44}
45
46impl JsonFileType {
47    /// get standard four files
48    pub fn standard_four() -> Vec<JsonFileType> {
49        vec![
50            JsonFileType::MemoryAnalysis,
51            JsonFileType::Lifetime,
52            JsonFileType::UnsafeFfi,
53            JsonFileType::Performance,
54        ]
55    }
56
57    /// get standard five files
58    pub fn standard_five() -> Vec<JsonFileType> {
59        vec![
60            JsonFileType::MemoryAnalysis,
61            JsonFileType::Lifetime,
62            JsonFileType::UnsafeFfi,
63            JsonFileType::Performance,
64            JsonFileType::ComplexTypes,
65        ]
66    }
67
68    /// get file suffix
69    pub fn file_suffix(&self) -> &'static str {
70        match self {
71            JsonFileType::MemoryAnalysis => "memory_analysis",
72            JsonFileType::Lifetime => "lifetime",
73            JsonFileType::UnsafeFfi => "unsafe_ffi",
74            JsonFileType::Performance => "performance",
75            JsonFileType::ComplexTypes => "complex_types",
76            JsonFileType::SecurityViolations => "security_violations",
77        }
78    }
79}
80
81/// Global adaptive performance optimizer instance
82static ADAPTIVE_OPTIMIZER: LazyLock<std::sync::Mutex<AdaptivePerformanceOptimizer>> =
83    LazyLock::new(|| std::sync::Mutex::new(AdaptivePerformanceOptimizer::default()));
84
85/// Global security violation analyzer instance
86static SECURITY_ANALYZER: LazyLock<std::sync::Mutex<SecurityViolationAnalyzer>> =
87    LazyLock::new(|| std::sync::Mutex::new(SecurityViolationAnalyzer::default()));
88
89/// Optimized export options with intelligent defaults
90#[derive(Debug, Clone)]
91pub struct OptimizedExportOptions {
92    /// Use parallel processing for large datasets (default: auto-detect)
93    pub parallel_processing: bool,
94    /// Buffer size for file I/O (default: 256KB for better performance)
95    pub buffer_size: usize,
96    /// Use compact JSON format for large files (default: auto-detect)
97    pub use_compact_format: Option<bool>,
98    /// Enable type inference caching (default: true)
99    pub enable_type_cache: bool,
100    /// Batch size for processing allocations (default: 1000)
101    pub batch_size: usize,
102    /// Enable streaming JSON writer for large files (default: auto-detect)
103    pub use_streaming_writer: bool,
104    /// Enable schema validation (default: true)
105    pub enable_schema_validation: bool,
106    /// Optimization level (default: High)
107    pub optimization_level: OptimizationLevel,
108    /// Enable enhanced FFI analysis (default: true)
109    pub enable_enhanced_ffi_analysis: bool,
110    /// Enable boundary event processing (default: true)
111    pub enable_boundary_event_processing: bool,
112    /// Enable memory passport tracking (default: true)
113    pub enable_memory_passport_tracking: bool,
114    /// Enable adaptive performance optimization (default: true)
115    pub enable_adaptive_optimization: bool,
116    /// Maximum cache size for type information (default: 1000)
117    pub max_cache_size: usize,
118    /// Target processing time per batch in milliseconds (default: 10ms)
119    pub target_batch_time_ms: u64,
120    /// Enable comprehensive security violation analysis (default: true)
121    pub enable_security_analysis: bool,
122    /// Include low severity violations in security reports (default: true)
123    pub include_low_severity_violations: bool,
124    /// Generate data integrity hashes for security reports (default: true)
125    pub generate_integrity_hashes: bool,
126    /// Enable fast export mode using the new coordinator (default: false)
127    pub enable_fast_export_mode: bool,
128    /// Auto-enable fast export for large datasets (default: true)
129    pub auto_fast_export_threshold: Option<usize>,
130    /// Thread count for parallel processing (default: auto-detect)
131    pub thread_count: Option<usize>,
132}
133
134/// Optimization levels for export processing
135#[derive(Debug, Clone, Copy, PartialEq, Eq)]
136pub enum OptimizationLevel {
137    /// Basic optimization - fastest export
138    Low,
139    /// Balanced optimization - good performance with enhanced features
140    Medium,
141    /// Full optimization - all features enabled, may be slower
142    High,
143    /// Maximum optimization - experimental features enabled
144    Maximum,
145}
146
147impl Default for OptimizedExportOptions {
148    fn default() -> Self {
149        Self {
150            parallel_processing: true,
151            buffer_size: 256 * 1024,  // 256KB buffer
152            use_compact_format: None, // Auto-detect based on file size
153            enable_type_cache: true,
154            batch_size: 1000,
155            use_streaming_writer: true,
156            enable_schema_validation: true,
157            optimization_level: OptimizationLevel::High,
158            enable_enhanced_ffi_analysis: true,
159            enable_boundary_event_processing: true,
160            enable_memory_passport_tracking: true,
161            enable_adaptive_optimization: true,
162            max_cache_size: 1000,
163            target_batch_time_ms: 10,
164            enable_security_analysis: true,
165            include_low_severity_violations: true,
166            generate_integrity_hashes: true,
167            enable_fast_export_mode: false,
168            auto_fast_export_threshold: Some(5000),
169            thread_count: None, // Auto-detect
170        }
171    }
172}
173
174impl OptimizedExportOptions {
175    /// Create new options with specified optimization level
176    pub fn with_optimization_level(level: OptimizationLevel) -> Self {
177        let mut options = OptimizedExportOptions {
178            optimization_level: level,
179            ..Default::default()
180        };
181        match level {
182            OptimizationLevel::Low => {
183                options.parallel_processing = false;
184                options.use_streaming_writer = false;
185                options.enable_schema_validation = false;
186                options.enable_enhanced_ffi_analysis = false;
187                options.enable_boundary_event_processing = false;
188                options.enable_memory_passport_tracking = false;
189                options.enable_adaptive_optimization = false;
190                options.enable_security_analysis = false;
191            }
192            OptimizationLevel::Medium => {
193                options.parallel_processing = true;
194                options.use_streaming_writer = false;
195                options.enable_schema_validation = true;
196                options.enable_enhanced_ffi_analysis = true;
197                options.enable_boundary_event_processing = false;
198                options.enable_memory_passport_tracking = false;
199            }
200            OptimizationLevel::High => {
201                // Use default settings (all features enabled)
202            }
203            OptimizationLevel::Maximum => {
204                options.buffer_size = 512 * 1024; // 512KB buffer
205                options.batch_size = 2000;
206                // All features enabled with maximum settings
207            }
208        }
209
210        options
211    }
212
213    /// Enable or disable parallel processing
214    pub fn parallel_processing(mut self, enabled: bool) -> Self {
215        self.parallel_processing = enabled;
216        self
217    }
218
219    /// Set buffer size for I/O operations
220    pub fn buffer_size(mut self, size: usize) -> Self {
221        self.buffer_size = size;
222        self
223    }
224
225    /// Set batch size for processing
226    pub fn batch_size(mut self, size: usize) -> Self {
227        self.batch_size = size;
228        self
229    }
230
231    /// Enable or disable streaming writer
232    pub fn streaming_writer(mut self, enabled: bool) -> Self {
233        self.use_streaming_writer = enabled;
234        self
235    }
236
237    /// Enable or disable schema validation
238    pub fn schema_validation(mut self, enabled: bool) -> Self {
239        self.enable_schema_validation = enabled;
240        self
241    }
242
243    /// Enable or disable adaptive optimization
244    pub fn adaptive_optimization(mut self, enabled: bool) -> Self {
245        self.enable_adaptive_optimization = enabled;
246        self
247    }
248
249    /// Set maximum cache size
250    pub fn max_cache_size(mut self, size: usize) -> Self {
251        self.max_cache_size = size;
252        self
253    }
254
255    /// Enable or disable security violation analysis
256    pub fn security_analysis(mut self, enabled: bool) -> Self {
257        self.enable_security_analysis = enabled;
258        self
259    }
260
261    /// Include low severity violations in reports
262    pub fn include_low_severity(mut self, include: bool) -> Self {
263        self.include_low_severity_violations = include;
264        self
265    }
266
267    /// Enable or disable integrity hash generation
268    pub fn integrity_hashes(mut self, enabled: bool) -> Self {
269        self.generate_integrity_hashes = enabled;
270        self
271    }
272
273    /// Enable or disable fast export mode
274    pub fn fast_export_mode(mut self, enabled: bool) -> Self {
275        self.enable_fast_export_mode = enabled;
276        self
277    }
278
279    /// Set auto fast export threshold (None to disable auto mode)
280    pub fn auto_fast_export_threshold(mut self, threshold: Option<usize>) -> Self {
281        self.auto_fast_export_threshold = threshold;
282        self
283    }
284
285    /// Set thread count for parallel processing (None for auto-detect)
286    pub fn thread_count(mut self, count: Option<usize>) -> Self {
287        self.thread_count = count;
288        self
289    }
290}
291
292/// Simple streaming JSON writer for memory-efficient large file export
293struct StreamingJsonWriter<W: Write> {
294    writer: W,
295}
296
297impl<W: Write> StreamingJsonWriter<W> {
298    /// Create new streaming writer
299    fn new(writer: W) -> Self {
300        Self { writer }
301    }
302
303    /// Write complete JSON data using streaming approach
304    fn write_complete_json(&mut self, data: &serde_json::Value) -> TrackingResult<()> {
305        // Use serde_json's streaming capabilities for memory efficiency
306        serde_json::to_writer(&mut self.writer, data)
307            .expect("Failed to write JSON data to streaming writer");
308        Ok(())
309    }
310
311    /// Write pretty-formatted JSON data using streaming approach
312    fn write_pretty_json(&mut self, data: &serde_json::Value) -> TrackingResult<()> {
313        // Use serde_json's pretty printing with streaming
314        serde_json::to_writer_pretty(&mut self.writer, data)
315            .expect("Failed to write pretty JSON data to streaming writer");
316        Ok(())
317    }
318
319    /// Finalize the writer and ensure all data is flushed
320    fn finalize(&mut self) -> TrackingResult<()> {
321        self.writer
322            .flush()
323            .expect("Failed to flush streaming writer");
324        Ok(())
325    }
326}
327
328/// Type inference cache for performance optimization
329static TYPE_CACHE: LazyLock<std::sync::Mutex<HashMap<String, String>>> =
330    LazyLock::new(|| std::sync::Mutex::new(HashMap::new()));
331
332/// Get cached type information or compute and cache it
333fn get_or_compute_type_info(type_name: &str, size: usize) -> String {
334    if let Ok(mut cache) = TYPE_CACHE.lock() {
335        let key = format!("{type_name}:{size}");
336        if let Some(cached) = cache.get(&key) {
337            return cached.clone();
338        }
339        let type_info = compute_enhanced_type_info(type_name, size);
340        cache.insert(key, type_info.clone());
341        type_info
342    } else {
343        compute_enhanced_type_info(type_name, size)
344    }
345}
346
347/// Compute enhanced type information
348fn compute_enhanced_type_info(type_name: &str, size: usize) -> String {
349    if type_name.contains("Vec<") {
350        "Vec<T>".to_string()
351    } else if type_name.contains("HashMap") {
352        "HashMap<K,V>".to_string()
353    } else if type_name.contains("String") {
354        "String".to_string()
355    } else {
356        match size {
357            1..=8 => "Primitive".to_string(),
358            9..=32 => "SmallStruct".to_string(),
359            33..=128 => "MediumStruct".to_string(),
360            129..=1024 => "LargeStruct".to_string(),
361            _ => "Buffer".to_string(),
362        }
363    }
364}
365
366/// Clear the type cache (useful for testing)
367pub fn clear_type_cache() {
368    if let Ok(mut cache) = TYPE_CACHE.lock() {
369        cache.clear();
370    }
371}
372
373/// Process a batch of allocations (legacy function for compatibility)
374/// Analyze boundary events for an allocation
375fn analyze_boundary_events(alloc: &AllocationInfo) -> Option<serde_json::Value> {
376    // Get boundary events from the unsafe FFI tracker
377    let tracker = get_global_unsafe_ffi_tracker();
378    if let Ok(allocations) = tracker.get_enhanced_allocations() {
379        for enhanced_alloc in allocations {
380            if enhanced_alloc.base.ptr == alloc.ptr
381                && !enhanced_alloc.cross_boundary_events.is_empty()
382            {
383                let events: Vec<serde_json::Value> = enhanced_alloc
384                    .cross_boundary_events
385                    .iter()
386                    .map(|event| {
387                        serde_json::json!({
388                            "event_type": format!("{:?}", event.event_type),
389                            "from_context": event.from_context,
390                            "to_context": event.to_context,
391                            "timestamp": event.timestamp
392                        })
393                    })
394                    .collect();
395
396                return Some(serde_json::json!({
397                    "has_boundary_events": true,
398                    "event_count": events.len(),
399                    "events": events
400                }));
401            }
402        }
403    }
404
405    None
406}
407
408/// Get memory passport information for a pointer
409fn get_memory_passport_info(ptr: usize) -> Option<serde_json::Value> {
410    let tracker = get_global_unsafe_ffi_tracker();
411    if let Ok(passports) = tracker.get_memory_passports() {
412        if let Some(passport) = passports.get(&ptr) {
413            return Some(serde_json::json!({
414                "passport_id": passport.passport_id,
415                "origin_context": passport.origin.context,
416                "current_owner": passport.current_owner.owner_context,
417                "validity_status": format!("{:?}", passport.validity_status),
418                "security_clearance": format!("{:?}", passport.security_clearance),
419                "journey_length": passport.journey.len(),
420                "last_stamp": passport.journey.last().map(|stamp| serde_json::json!({
421                    "operation": stamp.operation,
422                    "location": stamp.location,
423                    "timestamp": stamp.timestamp
424                }))
425            }));
426        }
427    }
428
429    None
430}
431
432/// Optimized file writing with streaming support and schema validation
433fn write_json_optimized<P: AsRef<Path>>(
434    path: P,
435    data: &serde_json::Value,
436    options: &OptimizedExportOptions,
437) -> TrackingResult<()> {
438    let path = path.as_ref();
439
440    // Validate schema if enabled and not in fast export mode
441    if options.enable_schema_validation && !options.enable_fast_export_mode {
442        let validator = SchemaValidator::new();
443        if let Ok(validation_result) = validator.validate_unsafe_ffi_analysis(data) {
444            if !validation_result.is_valid {
445                eprintln!("⚠️ Schema validation warnings:");
446                for error in validation_result.errors {
447                    eprintln!("  - {}: {}", error.code, error.message);
448                }
449                for warning in validation_result.warnings {
450                    eprintln!("  - {}: {}", warning.warning_code, warning.message);
451                }
452            }
453        }
454    } else if options.enable_fast_export_mode {
455        // Fast mode: skip validation for better performance
456    }
457
458    // Determine format based on data size
459    let estimated_size = estimate_json_size(data);
460    let use_compact = options
461        .use_compact_format
462        .unwrap_or(estimated_size > 1_000_000); // Use compact for files > 1MB
463
464    // Use streaming writer for large files when explicitly enabled
465    if options.use_streaming_writer && estimated_size > 500_000 {
466        tracing::info!(
467            "Using streaming writer for large file (size: {} bytes)",
468            estimated_size
469        );
470        let file = File::create(path)?;
471        let buffered_file = BufWriter::with_capacity(options.buffer_size * 2, file);
472        let mut streaming_writer = StreamingJsonWriter::new(buffered_file);
473
474        match use_compact {
475            true => streaming_writer.write_complete_json(data)?,
476            false => streaming_writer.write_pretty_json(data)?,
477        }
478
479        streaming_writer.finalize()?;
480    } else {
481        // Use traditional buffered writer for smaller files
482        let file = File::create(path)?;
483        let mut writer = BufWriter::with_capacity(options.buffer_size, file);
484
485        if use_compact {
486            serde_json::to_writer(&mut writer, data)?;
487        } else {
488            serde_json::to_writer_pretty(&mut writer, data)?;
489        }
490
491        writer.flush()?;
492    }
493
494    Ok(())
495}
496
497/// Estimate JSON size for format decision
498fn estimate_json_size(data: &serde_json::Value) -> usize {
499    // Quick estimation based on structure
500    match data {
501        serde_json::Value::Object(obj) => {
502            obj.len() * 50 + obj.values().map(estimate_json_size).sum::<usize>()
503        }
504        serde_json::Value::Array(arr) => {
505            arr.len() * 20 + arr.iter().map(estimate_json_size).sum::<usize>()
506        }
507        serde_json::Value::String(s) => s.len() + 10,
508        _ => 20,
509    }
510}
511
512/// Convert legacy ExportOptions to OptimizedExportOptions for backward compatibility
513#[allow(dead_code)]
514fn convert_legacy_options_to_optimized(
515    legacy: crate::core::tracker::ExportOptions,
516) -> OptimizedExportOptions {
517    let mut optimized = OptimizedExportOptions {
518        buffer_size: legacy.buffer_size,
519        use_compact_format: Some(!legacy.verbose_logging), // Verbose = pretty format
520        ..Default::default()
521    };
522
523    // Determine optimization level based on legacy settings
524    if legacy.include_system_allocations {
525        // System allocations = comprehensive analysis = Maximum optimization
526        optimized.optimization_level = OptimizationLevel::Maximum;
527        optimized.enable_enhanced_ffi_analysis = true;
528        optimized.enable_boundary_event_processing = true;
529        optimized.enable_memory_passport_tracking = true;
530        optimized.enable_security_analysis = true;
531    } else {
532        // User-focused mode = High optimization (default)
533        optimized.optimization_level = OptimizationLevel::High;
534    }
535
536    // Enable compression if requested in legacy options
537    if legacy.compress_output {
538        optimized.use_compact_format = Some(true);
539        optimized.buffer_size = optimized.buffer_size.max(512 * 1024); // Larger buffer for compression
540    }
541
542    // Adjust parallel processing based on expected load
543    optimized.parallel_processing =
544        legacy.include_system_allocations || legacy.buffer_size > 128 * 1024;
545
546    println!("🔄 Converted legacy ExportOptions to OptimizedExportOptions:");
547    println!(
548        "   - Optimization level: {:?}",
549        optimized.optimization_level
550    );
551    println!("   - Buffer size: {} KB", optimized.buffer_size / 1024);
552    println!(
553        "   - Parallel processing: {}",
554        optimized.parallel_processing
555    );
556    println!(
557        "   - Enhanced features: {}",
558        optimized.enable_enhanced_ffi_analysis
559    );
560
561    optimized
562}
563
564/// Ultra-fast export implementation (legacy methods for backward compatibility)
565impl MemoryTracker {
566    /// Optimized export to standard 4 JSON files (replaces export_separated_json_simple)
567    pub fn export_optimized_json_files<P: AsRef<Path>>(&self, base_path: P) -> TrackingResult<()> {
568        let options = OptimizedExportOptions::default();
569        self.export_optimized_json_files_with_options(base_path, options)
570    }
571
572    /// Export to 5 JSON files including complex types analysis
573    pub fn export_optimized_json_files_with_complex_types<P: AsRef<Path>>(
574        &self,
575        base_path: P,
576    ) -> TrackingResult<()> {
577        let options = OptimizedExportOptions::default();
578        self.export_extensible_json_files_with_options(
579            base_path,
580            &JsonFileType::standard_five(),
581            options,
582        )
583    }
584
585    /// Optimized export to standard 4 JSON files with custom options
586    pub fn export_optimized_json_files_with_options<P: AsRef<Path>>(
587        &self,
588        base_path: P,
589        options: OptimizedExportOptions,
590    ) -> TrackingResult<()> {
591        let start_time = std::time::Instant::now();
592        println!("🚀 Starting optimized 4-file JSON export...");
593
594        let base_path = base_path.as_ref();
595        let base_name = base_path
596            .file_stem()
597            .and_then(|s| s.to_str())
598            .unwrap_or("export");
599        let parent_dir = base_path.parent().unwrap_or(Path::new("."));
600
601        // Get data once for all files
602        let allocations = self.get_active_allocations()?;
603        let stats = self.get_stats()?;
604
605        println!(
606            "📊 Processing {} allocations across 4 standard files...",
607            allocations.len()
608        );
609
610        // 1. Memory Analysis JSON (standard file 1)
611        let memory_path = parent_dir.join(format!("{base_name}_memory_analysis.json"));
612        let memory_data = create_optimized_memory_analysis(&allocations, &stats, &options)?;
613        write_json_optimized(&memory_path, &memory_data, &options)?;
614
615        // 2. Lifetime Analysis JSON (standard file 2)
616        let lifetime_path = parent_dir.join(format!("{base_name}_lifetime.json"));
617        let lifetime_data = create_optimized_lifetime_analysis(&allocations, &options)?;
618        write_json_optimized(&lifetime_path, &lifetime_data, &options)?;
619
620        // 3. Unsafe FFI Analysis JSON (standard file 3)
621        let unsafe_path = parent_dir.join(format!("{base_name}_unsafe_ffi.json"));
622        let unsafe_data = create_optimized_unsafe_ffi_analysis(&allocations, &options)?;
623        write_json_optimized(&unsafe_path, &unsafe_data, &options)?;
624
625        // 4. Performance Analysis JSON (standard file 4)
626        let perf_path = parent_dir.join(format!("{base_name}_performance.json"));
627        let perf_data =
628            create_optimized_performance_analysis(&allocations, &stats, start_time, &options)?;
629        write_json_optimized(&perf_path, &perf_data, &options)?;
630
631        let total_duration = start_time.elapsed();
632        println!("✅ Optimized 4-file export completed in {total_duration:?}",);
633        println!("📁 Generated standard files:");
634        println!("   1. {base_name}_memory_analysis.json");
635        println!("   2. {base_name}_lifetime.json");
636        println!("   3. {base_name}_unsafe_ffi.json");
637        println!("   4. {base_name}_performance.json");
638
639        // Show optimization effects
640        if options.parallel_processing {
641            println!("💡 Applied parallel processing optimization");
642        }
643        if options.enable_type_cache {
644            println!("💡 Applied type inference caching");
645        }
646        println!(
647            "💡 Applied optimized buffering ({} KB)",
648            options.buffer_size / 1024
649        );
650
651        Ok(())
652    }
653
654    /// A generic export method reserved for future expansion. can easily add a 5th and 6th JSON file
655    pub fn export_extensible_json_files<P: AsRef<Path>>(
656        &self,
657        base_path: P,
658        file_types: &[JsonFileType],
659    ) -> TrackingResult<()> {
660        let options = OptimizedExportOptions::default();
661        self.export_extensible_json_files_with_options(base_path, file_types, options)
662    }
663
664    /// A generic export method reserved for future expansion. can easily add a 5th and 6th JSON file
665    pub fn export_extensible_json_files_with_options<P: AsRef<Path>>(
666        &self,
667        base_path: P,
668        file_types: &[JsonFileType],
669        options: OptimizedExportOptions,
670    ) -> TrackingResult<()> {
671        let start_time = std::time::Instant::now();
672        println!(
673            "🚀 Starting extensible JSON export for {} files...",
674            file_types.len()
675        );
676
677        let base_path = base_path.as_ref();
678        let base_name = base_path
679            .file_stem()
680            .and_then(|s| s.to_str())
681            .unwrap_or("export");
682        let parent_dir = base_path.parent().unwrap_or(Path::new("."));
683
684        // Get data once for all files
685        let allocations = self.get_active_allocations()?;
686        let stats = self.get_stats()?;
687
688        println!("📊 Processing {} allocations...", allocations.len());
689
690        // genearte files
691        for file_type in file_types {
692            let (filename, data) = match file_type {
693                JsonFileType::MemoryAnalysis => {
694                    let filename = format!("{base_name}_memory_analysis.json");
695                    let data = create_optimized_memory_analysis(&allocations, &stats, &options)?;
696                    (filename, data)
697                }
698                JsonFileType::Lifetime => {
699                    let filename = format!("{base_name}_lifetime.json");
700                    let data = create_optimized_lifetime_analysis(&allocations, &options)?;
701                    (filename, data)
702                }
703                JsonFileType::UnsafeFfi => {
704                    let filename = format!("{base_name}_unsafe_ffi.json");
705                    let data = create_optimized_unsafe_ffi_analysis(&allocations, &options)?;
706                    (filename, data)
707                }
708                JsonFileType::Performance => {
709                    let filename = format!("{base_name}_performance.json");
710                    let data = create_optimized_performance_analysis(
711                        &allocations,
712                        &stats,
713                        start_time,
714                        &options,
715                    )?;
716                    (filename, data)
717                }
718                JsonFileType::ComplexTypes => {
719                    let filename = format!("{base_name}_complex_types.json");
720                    let data = create_optimized_complex_types_analysis(&allocations, &options)?;
721                    (filename, data)
722                }
723                JsonFileType::SecurityViolations => {
724                    let filename = format!("{base_name}_security_violations.json");
725                    let data = create_security_violation_analysis(&allocations, &options)?;
726                    (filename, data)
727                } // JsonFileType::AsyncAnalysis => { ... }
728                  // JsonFileType::ThreadSafety => { ... }
729            };
730
731            let file_path = parent_dir.join(filename);
732            write_json_optimized(&file_path, &data, &options)?;
733            println!(
734                "   ✅ Generated: {}",
735                file_path.file_name().unwrap().to_string_lossy()
736            );
737        }
738
739        let total_duration = start_time.elapsed();
740        println!("✅ Extensible export completed in {total_duration:?}");
741
742        Ok(())
743    }
744}
745
746/// Create optimized memory analysis
747fn create_optimized_memory_analysis(
748    allocations: &[AllocationInfo],
749    stats: &crate::core::types::MemoryStats,
750    options: &OptimizedExportOptions,
751) -> TrackingResult<serde_json::Value> {
752    let processed_allocations = process_allocations_optimized(allocations, options)?;
753
754    Ok(serde_json::json!({
755        "metadata": {
756            "analysis_type": "memory_analysis_optimized",
757            "optimization_level": "high",
758            "total_allocations": allocations.len(),
759            "export_version": "2.0",
760            "timestamp": std::time::SystemTime::now()
761                .duration_since(std::time::UNIX_EPOCH)
762                .unwrap_or_default()
763                .as_secs()
764        },
765        "memory_stats": {
766            "total_allocated": stats.total_allocated,
767            "active_memory": stats.active_memory,
768            "peak_memory": stats.peak_memory,
769            "total_allocations": stats.total_allocations
770        },
771        "allocations": processed_allocations
772    }))
773}
774
775/// Create optimized lifetime analysis
776fn create_optimized_lifetime_analysis(
777    allocations: &[AllocationInfo],
778    _options: &OptimizedExportOptions,
779) -> TrackingResult<serde_json::Value> {
780    // Lifetime analysis: group analysis by scope
781    let mut scope_analysis: HashMap<String, (usize, usize, Vec<usize>)> = HashMap::new();
782
783    for alloc in allocations {
784        let scope = alloc.scope_name.as_deref().unwrap_or("global");
785        let entry = scope_analysis
786            .entry(scope.to_string())
787            .or_insert((0, 0, Vec::new()));
788        entry.0 += alloc.size; // total size
789        entry.1 += 1; // allocation count
790        entry.2.push(alloc.size); // size list for statistics
791    }
792
793    // Convert to JSON format
794    let mut scope_stats: Vec<_> = scope_analysis
795        .into_iter()
796        .map(|(scope, (total_size, count, sizes))| {
797            let avg_size = if count > 0 { total_size / count } else { 0 };
798            let max_size = sizes.iter().max().copied().unwrap_or(0);
799            let min_size = sizes.iter().min().copied().unwrap_or(0);
800
801            serde_json::json!({
802                "scope_name": scope,
803                "total_size": total_size,
804                "allocation_count": count,
805                "average_size": avg_size,
806                "max_size": max_size,
807                "min_size": min_size
808            })
809        })
810        .collect();
811
812    // Sort by total size
813    scope_stats.sort_by(|a, b| {
814        b["total_size"]
815            .as_u64()
816            .unwrap_or(0)
817            .cmp(&a["total_size"].as_u64().unwrap_or(0))
818    });
819
820    Ok(serde_json::json!({
821        "metadata": {
822            "analysis_type": "lifetime_analysis_optimized",
823            "optimization_level": "high",
824            "total_scopes": scope_stats.len(),
825            "export_version": "2.0",
826            "timestamp": std::time::SystemTime::now()
827                .duration_since(std::time::UNIX_EPOCH)
828                .unwrap_or_default()
829                .as_secs()
830        },
831        "scope_analysis": scope_stats,
832        "summary": {
833            "total_allocations": allocations.len(),
834            "unique_scopes": scope_stats.len()
835        }
836    }))
837}
838
839/// Create optimized unsafe FFI analysis
840fn create_optimized_unsafe_ffi_analysis(
841    allocations: &[AllocationInfo],
842    _options: &OptimizedExportOptions,
843) -> TrackingResult<serde_json::Value> {
844    // Analyze possible unsafe operations and FFI-related allocations
845    let mut unsafe_indicators = Vec::new();
846    let mut ffi_patterns = Vec::new();
847
848    for alloc in allocations {
849        // Check for unsafe patterns in type names
850        if let Some(type_name) = &alloc.type_name {
851            if type_name.contains("*mut") || type_name.contains("*const") {
852                unsafe_indicators.push(serde_json::json!({
853                    "ptr": format!("0x{:x}", alloc.ptr),
854                    "type": "raw_pointer",
855                    "type_name": type_name,
856                    "size": alloc.size,
857                    "risk_level": "high"
858                }));
859            } else if type_name.contains("extern") || type_name.contains("libc::") {
860                ffi_patterns.push(serde_json::json!({
861                    "ptr": format!("0x{:x}", alloc.ptr),
862                    "type": "ffi_related",
863                    "type_name": type_name,
864                    "size": alloc.size,
865                    "risk_level": "medium"
866                }));
867            }
868        }
869
870        // Check for unsafe patterns in variable names
871        if let Some(var_name) = &alloc.var_name {
872            if var_name.contains("unsafe") || var_name.contains("raw") {
873                unsafe_indicators.push(serde_json::json!({
874                    "ptr": format!("0x{:x}", alloc.ptr),
875                    "type": "unsafe_variable",
876                    "var_name": var_name,
877                    "size": alloc.size,
878                    "risk_level": "medium"
879                }));
880            }
881        }
882    }
883
884    Ok(serde_json::json!({
885        "metadata": {
886            "analysis_type": "unsafe_ffi_analysis_optimized",
887            "optimization_level": "high",
888            "total_allocations_analyzed": allocations.len(),
889            "export_version": "2.0",
890            "timestamp": std::time::SystemTime::now()
891                .duration_since(std::time::UNIX_EPOCH)
892                .unwrap_or_default()
893                .as_secs()
894        },
895        "unsafe_indicators": unsafe_indicators,
896        "ffi_patterns": ffi_patterns,
897        "summary": {
898            "unsafe_count": unsafe_indicators.len(),
899            "ffi_count": ffi_patterns.len(),
900            "total_risk_items": unsafe_indicators.len() + ffi_patterns.len(),
901            "risk_assessment": if unsafe_indicators.len() + ffi_patterns.len() > 10 {
902                "high"
903            } else if unsafe_indicators.len() + ffi_patterns.len() > 5 {
904                "medium"
905            } else {
906                "low"
907            }
908        }
909    }))
910}
911
912/// Create optimized performance analysis
913fn create_optimized_performance_analysis(
914    allocations: &[AllocationInfo],
915    stats: &crate::core::types::MemoryStats,
916    start_time: std::time::Instant,
917    options: &OptimizedExportOptions,
918) -> TrackingResult<serde_json::Value> {
919    let processing_time = start_time.elapsed();
920    let allocations_per_second = if processing_time.as_secs() > 0 {
921        allocations.len() as f64 / processing_time.as_secs_f64()
922    } else {
923        allocations.len() as f64 / 0.001 // assume minimum 1ms
924    };
925
926    // Analyze allocation size distribution
927    let mut size_distribution = HashMap::new();
928    for alloc in allocations {
929        let category = match alloc.size {
930            0..=64 => "tiny",
931            65..=256 => "small",
932            257..=1024 => "medium",
933            1025..=4096 => "large",
934            4097..=16384 => "huge",
935            _ => "massive",
936        };
937        *size_distribution.entry(category).or_insert(0) += 1;
938    }
939
940    Ok(serde_json::json!({
941        "metadata": {
942            "analysis_type": "performance_analysis_optimized",
943            "optimization_level": "high",
944            "export_version": "2.0",
945            "timestamp": std::time::SystemTime::now()
946                .duration_since(std::time::UNIX_EPOCH)
947                .unwrap_or_default()
948                .as_secs()
949        },
950        "export_performance": {
951            "total_processing_time_ms": processing_time.as_millis(),
952            "allocations_processed": allocations.len(),
953            "processing_rate": {
954                "allocations_per_second": allocations_per_second,
955                "performance_class": if allocations_per_second > 10000.0 {
956                    "excellent"
957                } else if allocations_per_second > 1000.0 {
958                    "good"
959                } else {
960                    "needs_optimization"
961                }
962            }
963        },
964        "memory_performance": {
965            "total_allocated": stats.total_allocated,
966            "active_memory": stats.active_memory,
967            "peak_memory": stats.peak_memory,
968            "memory_efficiency": if stats.peak_memory > 0 {
969                (stats.active_memory as f64 / stats.peak_memory as f64 * 100.0) as u64
970            } else {
971                100
972            }
973        },
974        "allocation_distribution": size_distribution,
975        "optimization_status": {
976            "type_caching": options.enable_type_cache,
977            "parallel_processing": options.parallel_processing,
978            "buffer_size_kb": options.buffer_size / 1024,
979            "batch_size": options.batch_size
980        }
981    }))
982}
983
984/// Create integrated memory analysis with all new pipeline components
985#[allow(dead_code)]
986fn create_integrated_memory_analysis(
987    allocations: &[AllocationInfo],
988    stats: &crate::core::types::MemoryStats,
989    options: &OptimizedExportOptions,
990) -> TrackingResult<serde_json::Value> {
991    println!("🔧 Creating integrated memory analysis with enhanced pipeline...");
992
993    // Use BatchProcessor for large datasets (simplified for now)
994    let _processed_allocations = process_allocations_optimized(allocations, options)?;
995
996    // Enhanced memory analysis with FFI integration
997    let mut enhanced_allocations = Vec::new();
998    for alloc in allocations {
999        let mut enhanced_alloc = serde_json::json!({
1000            "ptr": format!("0x{:x}", alloc.ptr),
1001            "size": alloc.size,
1002            "type_name": alloc.type_name,
1003            "var_name": alloc.var_name,
1004            "scope_name": alloc.scope_name,
1005            "timestamp_alloc": alloc.timestamp_alloc,
1006            "timestamp_dealloc": alloc.timestamp_dealloc
1007        });
1008
1009        // Add boundary events if enabled
1010        if options.enable_boundary_event_processing {
1011            if let Some(boundary_info) = analyze_boundary_events(alloc) {
1012                enhanced_alloc["boundary_events"] = boundary_info;
1013            }
1014        }
1015
1016        // Add memory passport if enabled
1017        if options.enable_memory_passport_tracking {
1018            if let Some(passport_info) = get_memory_passport_info(alloc.ptr) {
1019                enhanced_alloc["memory_passport"] = passport_info;
1020            }
1021        }
1022
1023        enhanced_allocations.push(enhanced_alloc);
1024    }
1025
1026    Ok(serde_json::json!({
1027        "metadata": {
1028            "analysis_type": "integrated_memory_analysis",
1029            "optimization_level": format!("{:?}", options.optimization_level),
1030            "total_allocations": allocations.len(),
1031            "export_version": "2.0",
1032            "pipeline_features": {
1033                "batch_processing": options.parallel_processing && allocations.len() > options.batch_size,
1034                "boundary_events": options.enable_boundary_event_processing,
1035                "memory_passports": options.enable_memory_passport_tracking,
1036                "enhanced_ffi": options.enable_enhanced_ffi_analysis
1037            },
1038            "timestamp": std::time::SystemTime::now()
1039                .duration_since(std::time::UNIX_EPOCH)
1040                .unwrap_or_default()
1041                .as_secs()
1042        },
1043        "memory_stats": {
1044            "total_allocated": stats.total_allocated,
1045            "active_memory": stats.active_memory,
1046            "peak_memory": stats.peak_memory,
1047            "total_allocations": stats.total_allocations
1048        },
1049        "allocations": enhanced_allocations
1050    }))
1051}
1052
1053/// Create integrated lifetime analysis with enhanced pipeline
1054#[allow(dead_code)]
1055fn create_integrated_lifetime_analysis(
1056    allocations: &[AllocationInfo],
1057    options: &OptimizedExportOptions,
1058) -> TrackingResult<serde_json::Value> {
1059    println!("🔧 Creating integrated lifetime analysis with enhanced pipeline...");
1060
1061    // Use BatchProcessor for scope analysis
1062    let mut scope_analysis: HashMap<String, (usize, usize, Vec<usize>)> = HashMap::new();
1063    let mut lifecycle_events = Vec::new();
1064
1065    // Process in batches if enabled
1066    if options.parallel_processing && allocations.len() > options.batch_size {
1067        let chunks: Vec<_> = allocations.chunks(options.batch_size).collect();
1068        let results: Vec<_> = chunks
1069            .par_iter()
1070            .map(|chunk| {
1071                let mut local_scope_analysis = HashMap::new();
1072                let mut local_events = Vec::new();
1073
1074                for alloc in *chunk {
1075                    let scope = alloc.scope_name.as_deref().unwrap_or("global");
1076                    let entry =
1077                        local_scope_analysis
1078                            .entry(scope.to_string())
1079                            .or_insert((0, 0, Vec::new()));
1080                    entry.0 += alloc.size;
1081                    entry.1 += 1;
1082                    entry.2.push(alloc.size);
1083
1084                    // Track lifecycle events with variable and type information
1085                    local_events.push(serde_json::json!({
1086                        "ptr": format!("0x{:x}", alloc.ptr),
1087                        "event": "allocation",
1088                        "scope": scope,
1089                        "timestamp": alloc.timestamp_alloc,
1090                        "size": alloc.size,
1091                        "var_name": alloc.var_name.as_deref().unwrap_or("unknown"),
1092                        "type_name": alloc.type_name.as_deref().unwrap_or("unknown")
1093                    }));
1094                }
1095
1096                (local_scope_analysis, local_events)
1097            })
1098            .collect();
1099
1100        // Merge results
1101        for (local_scope, local_events) in results {
1102            for (scope, (size, count, sizes)) in local_scope {
1103                let entry = scope_analysis.entry(scope).or_insert((0, 0, Vec::new()));
1104                entry.0 += size;
1105                entry.1 += count;
1106                entry.2.extend(sizes);
1107            }
1108            lifecycle_events.extend(local_events);
1109        }
1110    } else {
1111        // Sequential processing
1112        for alloc in allocations {
1113            let scope = alloc.scope_name.as_deref().unwrap_or("global");
1114            let entry = scope_analysis
1115                .entry(scope.to_string())
1116                .or_insert((0, 0, Vec::new()));
1117            entry.0 += alloc.size;
1118            entry.1 += 1;
1119            entry.2.push(alloc.size);
1120
1121            lifecycle_events.push(serde_json::json!({
1122                "ptr": format!("0x{:x}", alloc.ptr),
1123                "event": "allocation",
1124                "scope": scope,
1125                "timestamp": alloc.timestamp_alloc,
1126                "size": alloc.size,
1127                "var_name": alloc.var_name.as_deref().unwrap_or("unknown"),
1128                "type_name": alloc.type_name.as_deref().unwrap_or("unknown")
1129            }));
1130        }
1131    }
1132
1133    // Convert to JSON format
1134    let mut scope_stats: Vec<_> = scope_analysis
1135        .into_iter()
1136        .map(|(scope, (total_size, count, sizes))| {
1137            let avg_size = if count > 0 { total_size / count } else { 0 };
1138            let max_size = sizes.iter().max().copied().unwrap_or(0);
1139            let min_size = sizes.iter().min().copied().unwrap_or(0);
1140
1141            serde_json::json!({
1142                "scope_name": scope,
1143                "total_size": total_size,
1144                "allocation_count": count,
1145                "average_size": avg_size,
1146                "max_size": max_size,
1147                "min_size": min_size
1148            })
1149        })
1150        .collect();
1151
1152    scope_stats.sort_by(|a, b| {
1153        b["total_size"]
1154            .as_u64()
1155            .unwrap_or(0)
1156            .cmp(&a["total_size"].as_u64().unwrap_or(0))
1157    });
1158
1159    Ok(serde_json::json!({
1160        "metadata": {
1161            "analysis_type": "integrated_lifetime_analysis",
1162            "optimization_level": format!("{:?}", options.optimization_level),
1163            "total_scopes": scope_stats.len(),
1164            "export_version": "2.0",
1165            "pipeline_features": {
1166                "batch_processing": options.parallel_processing && allocations.len() > options.batch_size,
1167                "lifecycle_tracking": true
1168            },
1169            "timestamp": std::time::SystemTime::now()
1170                .duration_since(std::time::UNIX_EPOCH)
1171                .unwrap_or_default()
1172                .as_secs()
1173        },
1174        "scope_analysis": scope_stats,
1175        "lifecycle_events": lifecycle_events,
1176        "summary": {
1177            "total_allocations": allocations.len(),
1178            "unique_scopes": scope_stats.len(),
1179            "total_events": lifecycle_events.len()
1180        }
1181    }))
1182}
1183
1184/// Create integrated unsafe FFI analysis with all enhanced features
1185#[allow(dead_code)]
1186fn create_integrated_unsafe_ffi_analysis(
1187    allocations: &[AllocationInfo],
1188    options: &OptimizedExportOptions,
1189) -> TrackingResult<serde_json::Value> {
1190    println!("🔧 Creating integrated unsafe FFI analysis with enhanced pipeline...");
1191
1192    let mut unsafe_indicators = Vec::new();
1193    let mut ffi_patterns = Vec::new();
1194    let mut enhanced_ffi_data = Vec::new();
1195    let mut safety_violations = Vec::new();
1196    let mut boundary_events = Vec::new();
1197
1198    // Get enhanced FFI data from tracker if available
1199    if options.enable_enhanced_ffi_analysis {
1200        let tracker = get_global_unsafe_ffi_tracker();
1201        if let Ok(enhanced_allocations) = tracker.get_enhanced_allocations() {
1202            for enhanced_alloc in enhanced_allocations {
1203                enhanced_ffi_data.push(serde_json::json!({
1204                    "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
1205                    "size": enhanced_alloc.base.size,
1206                    "source": format!("{:?}", enhanced_alloc.source),
1207                    "ffi_tracked": enhanced_alloc.ffi_tracked,
1208                    "cross_boundary_events": enhanced_alloc.cross_boundary_events.len(),
1209                    "safety_violations": enhanced_alloc.safety_violations.len()
1210                }));
1211
1212                // Collect safety violations
1213                for violation in &enhanced_alloc.safety_violations {
1214                    let (violation_type, timestamp) = match violation {
1215                        SafetyViolation::DoubleFree { timestamp, .. } => ("DoubleFree", *timestamp),
1216                        SafetyViolation::InvalidFree { timestamp, .. } => {
1217                            ("InvalidFree", *timestamp)
1218                        }
1219                        SafetyViolation::PotentialLeak {
1220                            leak_detection_timestamp,
1221                            ..
1222                        } => ("PotentialLeak", *leak_detection_timestamp),
1223                        SafetyViolation::CrossBoundaryRisk { .. } => ("CrossBoundaryRisk", 0),
1224                    };
1225
1226                    safety_violations.push(serde_json::json!({
1227                        "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
1228                        "violation_type": violation_type,
1229                        "description": format!("{:?}", violation),
1230                        "timestamp": timestamp
1231                    }));
1232                }
1233
1234                // Collect boundary events
1235                if options.enable_boundary_event_processing {
1236                    for event in &enhanced_alloc.cross_boundary_events {
1237                        boundary_events.push(serde_json::json!({
1238                            "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
1239                            "event_type": format!("{:?}", event.event_type),
1240                            "from_context": event.from_context,
1241                            "to_context": event.to_context,
1242                            "timestamp": event.timestamp
1243                        }));
1244                    }
1245                }
1246            }
1247        }
1248    }
1249
1250    // Analyze basic patterns in allocations
1251    for alloc in allocations {
1252        if let Some(type_name) = &alloc.type_name {
1253            if type_name.contains("*mut") || type_name.contains("*const") {
1254                unsafe_indicators.push(serde_json::json!({
1255                    "ptr": format!("0x{:x}", alloc.ptr),
1256                    "type": "raw_pointer",
1257                    "type_name": type_name,
1258                    "size": alloc.size,
1259                    "risk_level": "high"
1260                }));
1261            } else if type_name.contains("extern") || type_name.contains("libc::") {
1262                ffi_patterns.push(serde_json::json!({
1263                    "ptr": format!("0x{:x}", alloc.ptr),
1264                    "type": "ffi_related",
1265                    "type_name": type_name,
1266                    "size": alloc.size,
1267                    "risk_level": "medium"
1268                }));
1269            }
1270        }
1271
1272        if let Some(var_name) = &alloc.var_name {
1273            if var_name.contains("unsafe") || var_name.contains("raw") {
1274                unsafe_indicators.push(serde_json::json!({
1275                    "ptr": format!("0x{:x}", alloc.ptr),
1276                    "type": "unsafe_variable",
1277                    "var_name": var_name,
1278                    "size": alloc.size,
1279                    "risk_level": "medium"
1280                }));
1281            }
1282        }
1283    }
1284
1285    Ok(serde_json::json!({
1286        "metadata": {
1287            "analysis_type": "integrated_unsafe_ffi_analysis",
1288            "optimization_level": format!("{:?}", options.optimization_level),
1289            "total_allocations_analyzed": allocations.len(),
1290            "export_version": "2.0",
1291            "pipeline_features": {
1292                "enhanced_ffi_analysis": options.enable_enhanced_ffi_analysis,
1293                "boundary_event_processing": options.enable_boundary_event_processing,
1294                "memory_passport_tracking": options.enable_memory_passport_tracking
1295            },
1296            "timestamp": std::time::SystemTime::now()
1297                .duration_since(std::time::UNIX_EPOCH)
1298                .unwrap_or_default()
1299                .as_secs()
1300        },
1301        "unsafe_indicators": unsafe_indicators,
1302        "ffi_patterns": ffi_patterns,
1303        "enhanced_ffi_data": enhanced_ffi_data,
1304        "safety_violations": safety_violations,
1305        "boundary_events": boundary_events,
1306        "summary": {
1307            "unsafe_count": unsafe_indicators.len(),
1308            "ffi_count": ffi_patterns.len(),
1309            "enhanced_entries": enhanced_ffi_data.len(),
1310            "safety_violations": safety_violations.len(),
1311            "boundary_events": boundary_events.len(),
1312            "total_risk_items": unsafe_indicators.len() + ffi_patterns.len() + safety_violations.len(),
1313            "risk_assessment": if safety_violations.len() > 5 {
1314                "critical"
1315            } else if unsafe_indicators.len() + ffi_patterns.len() > 10 {
1316                "high"
1317            } else if unsafe_indicators.len() + ffi_patterns.len() > 5 {
1318                "medium"
1319            } else {
1320                "low"
1321            }
1322        }
1323    }))
1324}
1325
1326/// Create integrated performance analysis with all pipeline metrics
1327#[allow(dead_code)]
1328fn create_integrated_performance_analysis(
1329    allocations: &[AllocationInfo],
1330    stats: &crate::core::types::MemoryStats,
1331    start_time: std::time::Instant,
1332    options: &OptimizedExportOptions,
1333) -> TrackingResult<serde_json::Value> {
1334    println!("🔧 Creating integrated performance analysis with enhanced pipeline...");
1335
1336    let processing_time = start_time.elapsed();
1337    let allocations_per_second = if processing_time.as_secs() > 0 {
1338        allocations.len() as f64 / processing_time.as_secs_f64()
1339    } else {
1340        allocations.len() as f64 / 0.001
1341    };
1342
1343    // Analyze allocation size distribution
1344    let mut size_distribution = HashMap::new();
1345    for alloc in allocations {
1346        let category = match alloc.size {
1347            0..=64 => "tiny",
1348            65..=256 => "small",
1349            257..=1024 => "medium",
1350            1025..=4096 => "large",
1351            4097..=16384 => "huge",
1352            _ => "massive",
1353        };
1354        *size_distribution.entry(category).or_insert(0) += 1;
1355    }
1356
1357    // Pipeline performance metrics
1358    let pipeline_metrics = serde_json::json!({
1359        "batch_processor": {
1360            "enabled": options.parallel_processing && allocations.len() > options.batch_size,
1361            "batch_size": options.batch_size,
1362            "estimated_batches": allocations.len().div_ceil(options.batch_size),
1363        },
1364        "streaming_writer": {
1365            "enabled": options.use_streaming_writer,
1366            "buffer_size_kb": options.buffer_size / 1024
1367        },
1368        "schema_validator": {
1369            "enabled": options.enable_schema_validation
1370        },
1371        "enhanced_features": {
1372            "ffi_analysis": options.enable_enhanced_ffi_analysis,
1373            "boundary_events": options.enable_boundary_event_processing,
1374            "memory_passports": options.enable_memory_passport_tracking
1375        }
1376    });
1377
1378    Ok(serde_json::json!({
1379        "metadata": {
1380            "analysis_type": "integrated_performance_analysis",
1381            "optimization_level": format!("{:?}", options.optimization_level),
1382            "export_version": "2.0",
1383            "timestamp": std::time::SystemTime::now()
1384                .duration_since(std::time::UNIX_EPOCH)
1385                .unwrap_or_default()
1386                .as_secs()
1387        },
1388        "export_performance": {
1389            "total_processing_time_ms": processing_time.as_millis(),
1390            "allocations_processed": allocations.len(),
1391            "processing_rate": {
1392                "allocations_per_second": allocations_per_second,
1393                "performance_class": if allocations_per_second > 10000.0 {
1394                    "excellent"
1395                } else if allocations_per_second > 1000.0 {
1396                    "good"
1397                } else {
1398                    "needs_optimization"
1399                }
1400            }
1401        },
1402        "memory_performance": {
1403            "total_allocated": stats.total_allocated,
1404            "active_memory": stats.active_memory,
1405            "peak_memory": stats.peak_memory,
1406            "memory_efficiency": if stats.peak_memory > 0 {
1407                (stats.active_memory as f64 / stats.peak_memory as f64 * 100.0) as u64
1408            } else {
1409                100
1410            }
1411        },
1412        "allocation_distribution": size_distribution,
1413        "pipeline_metrics": pipeline_metrics,
1414        "optimization_status": {
1415            "type_caching": options.enable_type_cache,
1416            "parallel_processing": options.parallel_processing,
1417            "buffer_size_kb": options.buffer_size / 1024,
1418            "batch_size": options.batch_size,
1419            "streaming_enabled": options.use_streaming_writer,
1420            "schema_validation": options.enable_schema_validation
1421        }
1422    }))
1423}
1424
1425/// Create optimized complex types analysis
1426fn create_optimized_complex_types_analysis(
1427    allocations: &[AllocationInfo],
1428    options: &OptimizedExportOptions,
1429) -> TrackingResult<serde_json::Value> {
1430    // Complex type analysis: Identify and analyze various complex Rust types
1431    let mut complex_type_stats: HashMap<String, ComplexTypeInfo> = HashMap::new();
1432    let mut generic_types = Vec::new();
1433    let mut trait_objects = Vec::new();
1434    let mut smart_pointers = Vec::new();
1435    let mut collections = Vec::new();
1436
1437    //  Use parallel processing to analyze complex types
1438    let use_parallel = options.parallel_processing && allocations.len() > 1000;
1439
1440    if use_parallel {
1441        // Parallel analysis of complex types
1442        let results: Vec<_> = allocations
1443            .par_chunks(options.batch_size)
1444            .map(analyze_complex_types_batch)
1445            .collect();
1446
1447        // Merge results
1448        for batch_result in results {
1449            for (type_name, info) in batch_result.type_stats {
1450                let entry = complex_type_stats
1451                    .entry(type_name)
1452                    .or_insert_with(ComplexTypeInfo::new);
1453                entry.merge(info);
1454            }
1455            generic_types.extend(batch_result.generic_types);
1456            trait_objects.extend(batch_result.trait_objects);
1457            smart_pointers.extend(batch_result.smart_pointers);
1458            collections.extend(batch_result.collections);
1459        }
1460    } else {
1461        // Serial analysis of complex types
1462        let batch_result = analyze_complex_types_batch(allocations);
1463        complex_type_stats = batch_result.type_stats;
1464        generic_types = batch_result.generic_types;
1465        trait_objects = batch_result.trait_objects;
1466        smart_pointers = batch_result.smart_pointers;
1467        collections = batch_result.collections;
1468    }
1469
1470    // Convert to JSON format and sort
1471    let mut type_analysis: Vec<_> = complex_type_stats.into_iter()
1472        .map(|(type_name, info)| {
1473            serde_json::json!({
1474                "type_name": type_name,
1475                "category": info.category,
1476                "total_size": info.total_size,
1477                "allocation_count": info.allocation_count,
1478                "average_size": if info.allocation_count > 0 { 
1479                    info.total_size / info.allocation_count
1480                } else {
1481                    0
1482                },
1483                "max_size": info.max_size,
1484                "complexity_score": info.complexity_score,
1485                "memory_efficiency": calculate_memory_efficiency(&type_name, info.total_size, info.allocation_count),
1486                "optimization_suggestions": generate_optimization_suggestions(&type_name, &info)
1487            })
1488        })
1489        .collect();
1490
1491    // Sort by complexity score and total size
1492    type_analysis.sort_by(|a, b| {
1493        let score_cmp = b["complexity_score"]
1494            .as_u64()
1495            .unwrap_or(0)
1496            .cmp(&a["complexity_score"].as_u64().unwrap_or(0));
1497        if score_cmp == std::cmp::Ordering::Equal {
1498            b["total_size"]
1499                .as_u64()
1500                .unwrap_or(0)
1501                .cmp(&a["total_size"].as_u64().unwrap_or(0))
1502        } else {
1503            score_cmp
1504        }
1505    });
1506
1507    Ok(serde_json::json!({
1508        "metadata": {
1509            "analysis_type": "complex_types_analysis_optimized",
1510            "optimization_level": "high",
1511            "total_allocations_analyzed": allocations.len(),
1512            "unique_complex_types": type_analysis.len(),
1513            "export_version": "2.0",
1514            "timestamp": std::time::SystemTime::now()
1515                .duration_since(std::time::UNIX_EPOCH)
1516                .unwrap_or_default()
1517                .as_secs(),
1518            "processing_mode": if use_parallel { "parallel" } else { "sequential" }
1519        },
1520        "complex_type_analysis": type_analysis,
1521        "categorized_types": {
1522            "generic_types": generic_types,
1523            "trait_objects": trait_objects,
1524            "smart_pointers": smart_pointers,
1525            "collections": collections
1526        },
1527        "summary": {
1528            "total_complex_types": type_analysis.len(),
1529            "generic_type_count": generic_types.len(),
1530            "trait_object_count": trait_objects.len(),
1531            "smart_pointer_count": smart_pointers.len(),
1532            "collection_count": collections.len(),
1533            "complexity_distribution": calculate_complexity_distribution(&type_analysis)
1534        },
1535        "optimization_recommendations": generate_global_optimization_recommendations(&type_analysis)
1536    }))
1537}
1538
1539/// Complex type information structure
1540#[derive(Debug, Clone)]
1541struct ComplexTypeInfo {
1542    /// Type category   
1543    category: String,
1544    /// Total size of allocations
1545    total_size: usize,
1546    /// Number of allocations
1547    allocation_count: usize,
1548    /// Maximum size of allocations
1549    max_size: usize,
1550    /// Complexity score of type    
1551    complexity_score: u64,
1552}
1553
1554impl ComplexTypeInfo {
1555    fn new() -> Self {
1556        Self {
1557            category: String::new(),
1558            total_size: 0,
1559            allocation_count: 0,
1560            max_size: 0,
1561            complexity_score: 0,
1562        }
1563    }
1564
1565    fn merge(&mut self, other: ComplexTypeInfo) {
1566        self.total_size += other.total_size;
1567        self.allocation_count += other.allocation_count;
1568        self.max_size = self.max_size.max(other.max_size);
1569        self.complexity_score = self.complexity_score.max(other.complexity_score);
1570        if self.category.is_empty() {
1571            self.category = other.category;
1572        }
1573    }
1574}
1575
1576/// Batch analysis result
1577struct ComplexTypeBatchResult {
1578    type_stats: HashMap<String, ComplexTypeInfo>,
1579    generic_types: Vec<serde_json::Value>,
1580    trait_objects: Vec<serde_json::Value>,
1581    smart_pointers: Vec<serde_json::Value>,
1582    collections: Vec<serde_json::Value>,
1583}
1584
1585/// Batch analyze complex types
1586fn analyze_complex_types_batch(allocations: &[AllocationInfo]) -> ComplexTypeBatchResult {
1587    let mut type_stats: HashMap<String, ComplexTypeInfo> = HashMap::new();
1588    let mut generic_types = Vec::new();
1589    let mut trait_objects = Vec::new();
1590    let mut smart_pointers = Vec::new();
1591    let mut collections = Vec::new();
1592
1593    for alloc in allocations {
1594        if let Some(type_name) = &alloc.type_name {
1595            let normalized_type = normalize_type_name(type_name);
1596            let category = categorize_complex_type(type_name);
1597            let complexity = calculate_type_complexity(type_name);
1598
1599            // Update type statistics
1600            let entry = type_stats
1601                .entry(normalized_type.clone())
1602                .or_insert_with(|| {
1603                    let mut info = ComplexTypeInfo::new();
1604                    info.category = category.clone();
1605                    info.complexity_score = complexity;
1606                    info
1607                });
1608            entry.total_size += alloc.size;
1609            entry.allocation_count += 1;
1610            entry.max_size = entry.max_size.max(alloc.size);
1611
1612            // Categorized collection
1613            let type_info = serde_json::json!({
1614                "ptr": format!("0x{:x}", alloc.ptr),
1615                "type_name": type_name,
1616                "normalized_type": normalized_type,
1617                "size": alloc.size,
1618                "var_name": alloc.var_name.as_deref().unwrap_or("unnamed"),
1619                "complexity_score": complexity
1620            });
1621
1622            match category.as_str() {
1623                "Generic" => generic_types.push(type_info),
1624                "TraitObject" => trait_objects.push(type_info),
1625                "SmartPointer" => smart_pointers.push(type_info),
1626                "Collection" => collections.push(type_info),
1627                _ => {} // Other types are not collected
1628            }
1629        }
1630    }
1631
1632    ComplexTypeBatchResult {
1633        type_stats,
1634        generic_types,
1635        trait_objects,
1636        smart_pointers,
1637        collections,
1638    }
1639}
1640
1641/// Standardize type name
1642fn normalize_type_name(type_name: &str) -> String {
1643    // Remove specific generic parameters, keep structure
1644    if type_name.contains('<') {
1645        if let Some(base) = type_name.split('<').next() {
1646            format!("{base}<T>")
1647        } else {
1648            type_name.to_string()
1649        }
1650    } else {
1651        type_name.to_string()
1652    }
1653}
1654
1655/// Categorize complex types
1656fn categorize_complex_type(type_name: &str) -> String {
1657    if type_name.contains("dyn ") {
1658        "TraitObject".to_string()
1659    } else if type_name.starts_with("Box<")
1660        || type_name.starts_with("Rc<")
1661        || type_name.starts_with("Arc<")
1662        || type_name.starts_with("RefCell<")
1663    {
1664        "SmartPointer".to_string()
1665    } else if type_name.starts_with("Vec<")
1666        || type_name.starts_with("HashMap<")
1667        || type_name.starts_with("BTreeMap<")
1668        || type_name.starts_with("HashSet<")
1669    {
1670        "Collection".to_string()
1671    } else if type_name.contains('<') && type_name.contains('>') {
1672        "Generic".to_string()
1673    } else if type_name.contains("::") {
1674        "ModulePath".to_string()
1675    } else {
1676        "Simple".to_string()
1677    }
1678}
1679
1680/// Calculate type complexity
1681fn calculate_type_complexity(type_name: &str) -> u64 {
1682    let mut score = 0u64;
1683
1684    // Base score
1685    score += 1;
1686
1687    // Generic parameters increase complexity
1688    score += type_name.matches('<').count() as u64 * 2;
1689
1690    // Nested level increases complexity
1691    let nesting_level = type_name.chars().filter(|&c| c == '<').count();
1692    score += nesting_level as u64 * 3;
1693
1694    // Special types increase complexity
1695    if type_name.contains("dyn ") {
1696        score += 5;
1697    }
1698    if type_name.contains("impl ") {
1699        score += 4;
1700    }
1701    if type_name.contains("async") {
1702        score += 3;
1703    }
1704    if type_name.contains("Future") {
1705        score += 3;
1706    }
1707
1708    // Smart pointers increase complexity
1709    if type_name.contains("Box<") {
1710        score += 2;
1711    }
1712    if type_name.contains("Rc<") {
1713        score += 3;
1714    }
1715    if type_name.contains("Arc<") {
1716        score += 4;
1717    }
1718    if type_name.contains("RefCell<") {
1719        score += 3;
1720    }
1721
1722    score
1723}
1724
1725/// Calculate memory efficiency based on type and average size
1726fn calculate_memory_efficiency(type_name: &str, total_size: usize, count: usize) -> u64 {
1727    if count == 0 {
1728        return 100;
1729    }
1730
1731    let avg_size = total_size / count;
1732
1733    //  Calculate efficiency based on type and average size
1734    if type_name.contains("Vec<") {
1735        // Vec efficiency depends on capacity utilization
1736        if avg_size < 64 {
1737            60
1738        } else {
1739            85
1740        }
1741    } else if type_name.contains("HashMap<") {
1742        // HashMap has additional overhead
1743        if avg_size < 128 {
1744            50
1745        } else {
1746            75
1747        }
1748    } else if type_name.contains("Box<") {
1749        // Box is usually very efficient
1750        90
1751    } else if type_name.contains("Arc<") || type_name.contains("Rc<") {
1752        // Reference counting has overhead
1753        80
1754    } else {
1755        // Default efficiency
1756        85
1757    }
1758}
1759
1760/// Generate optimization suggestions based on type and allocation information
1761fn generate_optimization_suggestions(type_name: &str, info: &ComplexTypeInfo) -> Vec<String> {
1762    let mut suggestions = Vec::new();
1763
1764    if info.allocation_count > 100 {
1765        suggestions
1766            .push("Consider using object pooling for frequently allocated types".to_string());
1767    }
1768
1769    if type_name.contains("Vec<") && info.total_size > 1024 * 1024 {
1770        suggestions
1771            .push("Consider pre-allocating Vec capacity to reduce reallocations".to_string());
1772    }
1773
1774    if type_name.contains("HashMap<") && info.allocation_count > 50 {
1775        suggestions.push("Consider using FxHashMap for better performance".to_string());
1776    }
1777
1778    if type_name.contains("Box<") && info.allocation_count > 200 {
1779        suggestions
1780            .push("Consider using arena allocation for many small Box allocations".to_string());
1781    }
1782
1783    if info.complexity_score > 10 {
1784        suggestions
1785            .push("High complexity type - consider simplifying or using type aliases".to_string());
1786    }
1787
1788    suggestions
1789}
1790
1791/// Calculate complexity distribution
1792fn calculate_complexity_distribution(type_analysis: &[serde_json::Value]) -> serde_json::Value {
1793    let mut low = 0;
1794    let mut medium = 0;
1795    let mut high = 0;
1796    let mut very_high = 0;
1797
1798    for analysis in type_analysis {
1799        if let Some(score) = analysis["complexity_score"].as_u64() {
1800            match score {
1801                0..=3 => low += 1,
1802                4..=7 => medium += 1,
1803                8..=15 => high += 1,
1804                _ => very_high += 1,
1805            }
1806        }
1807    }
1808
1809    serde_json::json!({
1810        "low_complexity": low,
1811        "medium_complexity": medium,
1812        "high_complexity": high,
1813        "very_high_complexity": very_high
1814    })
1815}
1816
1817/// Generate global optimization recommendations based on type analysis
1818fn generate_global_optimization_recommendations(
1819    type_analysis: &[serde_json::Value],
1820) -> Vec<String> {
1821    let mut recommendations = Vec::new();
1822
1823    let total_types = type_analysis.len();
1824    let high_complexity_count = type_analysis
1825        .iter()
1826        .filter(|t| t["complexity_score"].as_u64().unwrap_or(0) > 10)
1827        .count();
1828
1829    if high_complexity_count > total_types / 4 {
1830        recommendations.push(
1831            "Consider refactoring high-complexity types to improve maintainability".to_string(),
1832        );
1833    }
1834
1835    let large_allocation_count = type_analysis
1836        .iter()
1837        .filter(|t| t["allocation_count"].as_u64().unwrap_or(0) > 100)
1838        .count();
1839
1840    if large_allocation_count > 5 {
1841        recommendations.push(
1842            "Multiple types with high allocation frequency - consider object pooling".to_string(),
1843        );
1844    }
1845
1846    recommendations
1847        .push("Use 'cargo clippy' to identify additional optimization opportunities".to_string());
1848    recommendations.push(
1849        "Consider profiling with 'perf' or 'valgrind' for detailed performance analysis"
1850            .to_string(),
1851    );
1852
1853    recommendations
1854}
1855
1856/// Create optimized type analysis with caching
1857#[allow(dead_code)]
1858fn create_optimized_type_analysis(
1859    allocations: &[AllocationInfo],
1860    options: &OptimizedExportOptions,
1861) -> TrackingResult<serde_json::Value> {
1862    let mut type_stats: HashMap<String, (usize, usize, usize)> = HashMap::new();
1863
1864    // Use parallel processing for type analysis if beneficial
1865    let use_parallel = options.parallel_processing && allocations.len() > 1000;
1866
1867    if use_parallel {
1868        // Parallel type analysis
1869        let type_results: Vec<_> = allocations
1870            .par_chunks(options.batch_size)
1871            .map(|chunk| {
1872                let mut local_stats: HashMap<String, (usize, usize, usize)> = HashMap::new();
1873                for alloc in chunk {
1874                    let type_name = if let Some(name) = &alloc.type_name {
1875                        get_or_compute_type_info(name, alloc.size)
1876                    } else {
1877                        compute_enhanced_type_info("Unknown", alloc.size)
1878                    };
1879
1880                    let entry = local_stats.entry(type_name).or_insert((0, 0, 0));
1881                    entry.0 += alloc.size; // total size
1882                    entry.1 += 1; // count
1883                    entry.2 = entry.2.max(alloc.size); // max size
1884                }
1885                local_stats
1886            })
1887            .collect();
1888
1889        // Merge results
1890        for local_stats in type_results {
1891            for (type_name, (size, count, max_size)) in local_stats {
1892                let entry = type_stats.entry(type_name).or_insert((0, 0, 0));
1893                entry.0 += size;
1894                entry.1 += count;
1895                entry.2 = entry.2.max(max_size);
1896            }
1897        }
1898    } else {
1899        // Sequential type analysis
1900        for alloc in allocations {
1901            let type_name = if let Some(name) = &alloc.type_name {
1902                get_or_compute_type_info(name, alloc.size)
1903            } else {
1904                compute_enhanced_type_info("Unknown", alloc.size)
1905            };
1906
1907            let entry = type_stats.entry(type_name).or_insert((0, 0, 0));
1908            entry.0 += alloc.size;
1909            entry.1 += 1;
1910            entry.2 = entry.2.max(alloc.size);
1911        }
1912    }
1913
1914    // Convert to sorted JSON
1915    let mut type_list: Vec<_> = type_stats
1916        .into_iter()
1917        .map(|(type_name, (total_size, count, max_size))| {
1918            serde_json::json!({
1919                "type_name": type_name,
1920                "total_size": total_size,
1921                "allocation_count": count,
1922                "max_allocation_size": max_size,
1923                "average_size": if count > 0 { total_size / count } else { 0 }
1924            })
1925        })
1926        .collect();
1927
1928    // Sort by total size (descending)
1929    type_list.sort_by(|a, b| {
1930        b["total_size"]
1931            .as_u64()
1932            .unwrap_or(0)
1933            .cmp(&a["total_size"].as_u64().unwrap_or(0))
1934    });
1935
1936    Ok(serde_json::json!({
1937        "metadata": {
1938            "analysis_type": "type_analysis_optimized",
1939            "processing_mode": if use_parallel { "parallel" } else { "sequential" },
1940            "cache_enabled": options.enable_type_cache,
1941            "unique_types": type_list.len()
1942        },
1943        "type_statistics": type_list
1944    }))
1945}
1946
1947/// Create fast allocation summary
1948#[allow(dead_code)]
1949fn create_fast_allocation_summary(
1950    allocations: &[AllocationInfo],
1951    stats: &crate::core::types::MemoryStats,
1952) -> TrackingResult<serde_json::Value> {
1953    // Quick summary without heavy processing
1954    let total_size: usize = allocations.iter().map(|a| a.size).sum();
1955    let avg_size = if !allocations.is_empty() {
1956        total_size / allocations.len()
1957    } else {
1958        0
1959    };
1960
1961    // Size distribution (fast calculation)
1962    let mut small_count = 0;
1963    let mut medium_count = 0;
1964    let mut large_count = 0;
1965
1966    for alloc in allocations {
1967        match alloc.size {
1968            0..=256 => small_count += 1,
1969            257..=4096 => medium_count += 1,
1970            _ => large_count += 1,
1971        }
1972    }
1973
1974    Ok(serde_json::json!({
1975        "metadata": {
1976            "summary_type": "fast_allocation_summary",
1977            "generation_time": "minimal"
1978        },
1979        "overview": {
1980            "total_allocations": allocations.len(),
1981            "total_size": total_size,
1982            "average_size": avg_size,
1983            "active_memory": stats.active_memory,
1984            "peak_memory": stats.peak_memory
1985        },
1986        "size_distribution": {
1987            "small_allocations": {
1988                "count": small_count,
1989                "size_range": "0-256 bytes"
1990            },
1991            "medium_allocations": {
1992                "count": medium_count,
1993                "size_range": "257-4096 bytes"
1994            },
1995            "large_allocations": {
1996                "count": large_count,
1997                "size_range": ">4096 bytes"
1998            }
1999        }
2000    }))
2001}
2002
2003/// Process allocations with adaptive optimized pipeline
2004fn process_allocations_optimized(
2005    allocations: &[AllocationInfo],
2006    options: &OptimizedExportOptions,
2007) -> TrackingResult<Vec<serde_json::Value>> {
2008    let start_time = std::time::Instant::now();
2009    let mut processed = Vec::with_capacity(allocations.len());
2010
2011    // Get adaptive batch size if optimization is enabled
2012    let effective_batch_size = if options.enable_adaptive_optimization {
2013        if let Ok(optimizer) = ADAPTIVE_OPTIMIZER.lock() {
2014            optimizer.get_optimal_batch_size()
2015        } else {
2016            options.batch_size
2017        }
2018    } else {
2019        options.batch_size
2020    };
2021
2022    println!(
2023        "🔧 Processing {} allocations with adaptive batch size: {}",
2024        allocations.len(),
2025        effective_batch_size
2026    );
2027
2028    if options.parallel_processing && allocations.len() > effective_batch_size {
2029        // Parallel processing for large datasets
2030        let results: Vec<_> = allocations
2031            .par_chunks(effective_batch_size)
2032            .map(|chunk| {
2033                chunk
2034                    .iter()
2035                    .map(|alloc| {
2036                        serde_json::json!({
2037                            "ptr": format!("0x{:x}", alloc.ptr),
2038                            "size": alloc.size,
2039                            "type_name": alloc.type_name,
2040                            "var_name": alloc.var_name,
2041                            "scope_name": alloc.scope_name,
2042                            "timestamp": alloc.timestamp_alloc
2043                        })
2044                    })
2045                    .collect::<Vec<_>>()
2046            })
2047            .collect();
2048
2049        for chunk_result in results {
2050            processed.extend(chunk_result);
2051        }
2052    } else {
2053        // Sequential processing for smaller datasets
2054        for alloc in allocations {
2055            processed.push(serde_json::json!({
2056                "ptr": format!("0x{:x}", alloc.ptr),
2057                "size": alloc.size,
2058                "type_name": alloc.type_name,
2059                "var_name": alloc.var_name,
2060                "scope_name": alloc.scope_name,
2061                "timestamp": alloc.timestamp_alloc
2062            }));
2063        }
2064    }
2065
2066    // Record performance metrics if adaptive optimization is enabled
2067    if options.enable_adaptive_optimization {
2068        let processing_time = start_time.elapsed();
2069        let memory_usage_mb =
2070            (processed.len() * std::mem::size_of::<serde_json::Value>()) / (1024 * 1024);
2071
2072        if let Ok(mut optimizer) = ADAPTIVE_OPTIMIZER.lock() {
2073            optimizer.record_batch_performance(
2074                effective_batch_size,
2075                processing_time,
2076                memory_usage_mb as u64,
2077                allocations.len(),
2078            );
2079        }
2080    }
2081
2082    Ok(processed)
2083}
2084
2085/// Create security violation analysis with comprehensive context
2086#[allow(dead_code)]
2087fn create_security_violation_analysis(
2088    allocations: &[AllocationInfo],
2089    options: &OptimizedExportOptions,
2090) -> TrackingResult<serde_json::Value> {
2091    println!("🔒 Creating comprehensive security violation analysis...");
2092
2093    if !options.enable_security_analysis {
2094        return Ok(serde_json::json!({
2095            "metadata": {
2096                "analysis_type": "security_violations",
2097                "status": "disabled",
2098                "message": "Security analysis is disabled in export options"
2099            }
2100        }));
2101    }
2102
2103    // Configure security analyzer
2104    let analysis_config = AnalysisConfig {
2105        max_related_allocations: 10,
2106        max_stack_depth: 20,
2107        enable_correlation_analysis: true,
2108        include_low_severity: options.include_low_severity_violations,
2109        generate_integrity_hashes: options.generate_integrity_hashes,
2110    };
2111
2112    // Get security analyzer and update with current allocations
2113    let mut violation_reports = Vec::new();
2114    let mut security_summary = serde_json::json!({});
2115
2116    if let Ok(mut analyzer) = SECURITY_ANALYZER.lock() {
2117        // Update analyzer configuration
2118        *analyzer = SecurityViolationAnalyzer::new(analysis_config);
2119        analyzer.update_allocations(allocations.to_vec());
2120
2121        // Analyze violations from unsafe FFI tracker
2122        if let Ok(enhanced_allocations) = get_global_unsafe_ffi_tracker().get_enhanced_allocations()
2123        {
2124            for enhanced_alloc in enhanced_allocations {
2125                for violation in &enhanced_alloc.safety_violations {
2126                    if let Ok(violation_id) =
2127                        analyzer.analyze_violation(violation, enhanced_alloc.base.ptr)
2128                    {
2129                        println!("   ✅ Analyzed violation: {violation_id}");
2130                    }
2131                }
2132            }
2133        }
2134
2135        // Get all violation reports
2136        let all_reports = analyzer.get_all_reports();
2137
2138        // Filter by severity if needed
2139        let filtered_reports: Vec<_> = if options.include_low_severity_violations {
2140            all_reports.values().collect()
2141        } else {
2142            analyzer.get_reports_by_severity(ViolationSeverity::Medium)
2143        };
2144
2145        // Convert reports to JSON
2146        for report in &filtered_reports {
2147            violation_reports.push(serde_json::json!({
2148                "violation_id": report.violation_id,
2149                "violation_type": report.violation_type,
2150                "severity": format!("{:?}", report.severity),
2151                "description": report.description,
2152                "technical_details": report.technical_details,
2153                "memory_snapshot": {
2154                    "timestamp_ns": report.memory_snapshot.timestamp_ns,
2155                    "total_allocated_bytes": report.memory_snapshot.total_allocated_bytes,
2156                    "active_allocation_count": report.memory_snapshot.active_allocation_count,
2157                    "involved_addresses": report.memory_snapshot.involved_addresses,
2158                    "memory_pressure": format!("{:?}", report.memory_snapshot.memory_pressure),
2159                    "stack_trace": report.memory_snapshot.stack_trace.iter().map(|frame| {
2160                        serde_json::json!({
2161                            "function_name": frame.function_name,
2162                            "file_path": frame.file_path,
2163                            "line_number": frame.line_number,
2164                            "frame_address": frame.frame_address,
2165                            "is_unsafe": frame.is_unsafe,
2166                            "is_ffi": frame.is_ffi
2167                        })
2168                    }).collect::<Vec<_>>(),
2169                    "related_allocations": report.memory_snapshot.related_allocations.iter().map(|alloc| {
2170                        serde_json::json!({
2171                            "address": alloc.address,
2172                            "size": alloc.size,
2173                            "type_name": alloc.type_name,
2174                            "variable_name": alloc.variable_name,
2175                            "allocated_at_ns": alloc.allocated_at_ns,
2176                            "is_active": alloc.is_active,
2177                            "relationship": format!("{:?}", alloc.relationship)
2178                        })
2179                    }).collect::<Vec<_>>()
2180                },
2181                "impact_assessment": {
2182                    "exploitability_score": report.impact_assessment.exploitability_score,
2183                    "data_corruption_risk": report.impact_assessment.data_corruption_risk,
2184                    "information_disclosure_risk": report.impact_assessment.information_disclosure_risk,
2185                    "denial_of_service_risk": report.impact_assessment.denial_of_service_risk,
2186                    "code_execution_risk": report.impact_assessment.code_execution_risk,
2187                    "overall_risk_score": report.impact_assessment.overall_risk_score
2188                },
2189                "remediation_suggestions": report.remediation_suggestions,
2190                "correlated_violations": report.correlated_violations,
2191                "integrity_hash": report.integrity_hash,
2192                "generated_at_ns": report.generated_at_ns
2193            }));
2194        }
2195
2196        // Generate security summary
2197        security_summary = analyzer.generate_security_summary();
2198    }
2199
2200    Ok(serde_json::json!({
2201        "metadata": {
2202            "analysis_type": "security_violations",
2203            "export_version": "2.0",
2204            "total_violations": violation_reports.len(),
2205            "analysis_enabled": options.enable_security_analysis,
2206            "include_low_severity": options.include_low_severity_violations,
2207            "integrity_hashes_enabled": options.generate_integrity_hashes,
2208            "timestamp": std::time::SystemTime::now()
2209                .duration_since(std::time::UNIX_EPOCH)
2210                .unwrap_or_default()
2211                .as_secs()
2212        },
2213        "violation_reports": violation_reports,
2214        "security_summary": security_summary,
2215        "data_integrity": {
2216            "total_reports": violation_reports.len(),
2217            "reports_with_hashes": violation_reports.iter()
2218                .filter(|r| !r["integrity_hash"].as_str().unwrap_or("").is_empty())
2219                .count(),
2220            "verification_status": "all_verified" // Would implement actual verification
2221        },
2222        "analysis_recommendations": [
2223            if violation_reports.is_empty() {
2224                "No security violations detected in current analysis"
2225            } else {
2226                "Review all security violations and implement suggested remediations"
2227            },
2228            "Enable continuous security monitoring for production systems",
2229            "Implement automated violation detection and alerting",
2230            "Regular security audits and penetration testing recommended"
2231        ]
2232    }))
2233}
2234
2235/// Create performance metrics
2236#[allow(dead_code)]
2237fn create_performance_metrics(
2238    allocations: &[AllocationInfo],
2239    start_time: std::time::Instant,
2240) -> TrackingResult<serde_json::Value> {
2241    let processing_time = start_time.elapsed();
2242    let allocations_per_second = if processing_time.as_secs() > 0 {
2243        allocations.len() as f64 / processing_time.as_secs_f64()
2244    } else {
2245        allocations.len() as f64 / 0.001 // Assume 1ms minimum
2246    };
2247
2248    Ok(serde_json::json!({
2249        "metadata": {
2250            "metrics_type": "performance_optimized",
2251            "measurement_time": processing_time.as_millis()
2252        },
2253        "performance": {
2254            "total_processing_time_ms": processing_time.as_millis(),
2255            "allocations_processed": allocations.len(),
2256            "processing_rate": {
2257                "allocations_per_second": allocations_per_second,
2258                "performance_class": if allocations_per_second > 10000.0 {
2259                    "excellent"
2260                } else if allocations_per_second > 1000.0 {
2261                    "good"
2262                } else {
2263                    "needs_optimization"
2264                }
2265            }
2266        },
2267        "optimization_status": {
2268            "type_caching": "enabled",
2269            "parallel_processing": "auto-detected",
2270            "buffer_optimization": "enabled",
2271            "format_optimization": "auto-detected"
2272        }
2273    }))
2274}
2275
2276#[cfg(test)]
2277mod tests {
2278    use super::*;
2279    use crate::core::types::AllocationInfo;
2280    use std::time::Instant;
2281
2282    fn create_test_allocation(
2283        ptr: usize,
2284        size: usize,
2285        type_name: Option<String>,
2286        var_name: Option<String>,
2287    ) -> AllocationInfo {
2288        AllocationInfo {
2289            ptr,
2290            size,
2291            var_name,
2292            type_name,
2293            scope_name: None,
2294            timestamp_alloc: 1000,
2295            timestamp_dealloc: None,
2296            thread_id: "test_thread".to_string(),
2297            borrow_count: 0,
2298            stack_trace: None,
2299            is_leaked: false,
2300            lifetime_ms: None,
2301            borrow_info: None,
2302            clone_info: None,
2303            ownership_history_available: false,
2304            smart_pointer_info: None,
2305            memory_layout: None,
2306            generic_info: None,
2307            dynamic_type_info: None,
2308            runtime_state: None,
2309            stack_allocation: None,
2310            temporary_object: None,
2311            fragmentation_analysis: None,
2312            generic_instantiation: None,
2313            type_relationships: None,
2314            type_usage: None,
2315            function_call_tracking: None,
2316            lifecycle_tracking: None,
2317            access_tracking: None,
2318            drop_chain_analysis: None,
2319        }
2320    }
2321
2322    #[test]
2323    fn test_json_file_type_standard_four() {
2324        let standard_four = JsonFileType::standard_four();
2325        assert_eq!(standard_four.len(), 4);
2326        assert!(standard_four.contains(&JsonFileType::MemoryAnalysis));
2327        assert!(standard_four.contains(&JsonFileType::Lifetime));
2328        assert!(standard_four.contains(&JsonFileType::UnsafeFfi));
2329        assert!(standard_four.contains(&JsonFileType::Performance));
2330    }
2331
2332    #[test]
2333    fn test_json_file_type_standard_five() {
2334        let standard_five = JsonFileType::standard_five();
2335        assert_eq!(standard_five.len(), 5);
2336        assert!(standard_five.contains(&JsonFileType::MemoryAnalysis));
2337        assert!(standard_five.contains(&JsonFileType::Lifetime));
2338        assert!(standard_five.contains(&JsonFileType::UnsafeFfi));
2339        assert!(standard_five.contains(&JsonFileType::Performance));
2340        assert!(standard_five.contains(&JsonFileType::ComplexTypes));
2341    }
2342
2343    #[test]
2344    fn test_json_file_type_file_suffix() {
2345        assert_eq!(
2346            JsonFileType::MemoryAnalysis.file_suffix(),
2347            "memory_analysis"
2348        );
2349        assert_eq!(JsonFileType::Lifetime.file_suffix(), "lifetime");
2350        assert_eq!(JsonFileType::UnsafeFfi.file_suffix(), "unsafe_ffi");
2351        assert_eq!(JsonFileType::Performance.file_suffix(), "performance");
2352        assert_eq!(JsonFileType::ComplexTypes.file_suffix(), "complex_types");
2353        assert_eq!(
2354            JsonFileType::SecurityViolations.file_suffix(),
2355            "security_violations"
2356        );
2357    }
2358
2359    #[test]
2360    fn test_optimized_export_options_default() {
2361        let options = OptimizedExportOptions::default();
2362
2363        assert!(options.parallel_processing);
2364        assert_eq!(options.buffer_size, 256 * 1024);
2365        assert!(options.use_compact_format.is_none());
2366        assert!(options.enable_type_cache);
2367        assert_eq!(options.batch_size, 1000);
2368        assert!(options.use_streaming_writer);
2369        assert!(options.enable_schema_validation);
2370        assert_eq!(options.optimization_level, OptimizationLevel::High);
2371        assert!(options.enable_enhanced_ffi_analysis);
2372        assert!(options.enable_boundary_event_processing);
2373        assert!(options.enable_memory_passport_tracking);
2374        assert!(options.enable_adaptive_optimization);
2375        assert_eq!(options.max_cache_size, 1000);
2376        assert_eq!(options.target_batch_time_ms, 10);
2377        assert!(options.enable_security_analysis);
2378        assert!(options.include_low_severity_violations);
2379        assert!(options.generate_integrity_hashes);
2380        assert!(!options.enable_fast_export_mode);
2381        assert_eq!(options.auto_fast_export_threshold, Some(5000));
2382        assert!(options.thread_count.is_none());
2383    }
2384
2385    #[test]
2386    fn test_optimization_level_low() {
2387        let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::Low);
2388
2389        assert!(!options.parallel_processing);
2390        assert!(!options.use_streaming_writer);
2391        assert!(!options.enable_schema_validation);
2392        assert!(!options.enable_enhanced_ffi_analysis);
2393        assert!(!options.enable_boundary_event_processing);
2394        assert!(!options.enable_memory_passport_tracking);
2395        assert!(!options.enable_adaptive_optimization);
2396        assert!(!options.enable_security_analysis);
2397        assert_eq!(options.optimization_level, OptimizationLevel::Low);
2398    }
2399
2400    #[test]
2401    fn test_optimization_level_medium() {
2402        let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::Medium);
2403
2404        assert!(options.parallel_processing);
2405        assert!(!options.use_streaming_writer);
2406        assert!(options.enable_schema_validation);
2407        assert!(options.enable_enhanced_ffi_analysis);
2408        assert!(!options.enable_boundary_event_processing);
2409        assert!(!options.enable_memory_passport_tracking);
2410        assert_eq!(options.optimization_level, OptimizationLevel::Medium);
2411    }
2412
2413    #[test]
2414    fn test_optimization_level_high() {
2415        let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::High);
2416
2417        // High level should use default settings (all features enabled)
2418        assert!(options.parallel_processing);
2419        assert!(options.use_streaming_writer);
2420        assert!(options.enable_schema_validation);
2421        assert!(options.enable_enhanced_ffi_analysis);
2422        assert!(options.enable_boundary_event_processing);
2423        assert!(options.enable_memory_passport_tracking);
2424        assert_eq!(options.optimization_level, OptimizationLevel::High);
2425    }
2426
2427    #[test]
2428    fn test_optimization_level_maximum() {
2429        let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::Maximum);
2430
2431        assert_eq!(options.buffer_size, 512 * 1024); // 512KB buffer
2432        assert_eq!(options.batch_size, 2000);
2433        assert_eq!(options.optimization_level, OptimizationLevel::Maximum);
2434    }
2435
2436    #[test]
2437    fn test_optimized_export_options_builder_pattern() {
2438        let options = OptimizedExportOptions::default()
2439            .parallel_processing(false)
2440            .buffer_size(128 * 1024)
2441            .batch_size(500)
2442            .streaming_writer(false)
2443            .schema_validation(false)
2444            .adaptive_optimization(false)
2445            .max_cache_size(2000)
2446            .security_analysis(false)
2447            .include_low_severity(false)
2448            .integrity_hashes(false)
2449            .fast_export_mode(true)
2450            .auto_fast_export_threshold(Some(10000))
2451            .thread_count(Some(4));
2452
2453        assert!(!options.parallel_processing);
2454        assert_eq!(options.buffer_size, 128 * 1024);
2455        assert_eq!(options.batch_size, 500);
2456        assert!(!options.use_streaming_writer);
2457        assert!(!options.enable_schema_validation);
2458        assert!(!options.enable_adaptive_optimization);
2459        assert_eq!(options.max_cache_size, 2000);
2460        assert!(!options.enable_security_analysis);
2461        assert!(!options.include_low_severity_violations);
2462        assert!(!options.generate_integrity_hashes);
2463        assert!(options.enable_fast_export_mode);
2464        assert_eq!(options.auto_fast_export_threshold, Some(10000));
2465        assert_eq!(options.thread_count, Some(4));
2466    }
2467
2468    #[test]
2469    fn test_create_performance_metrics() {
2470        let allocations = vec![
2471            create_test_allocation(
2472                0x1000,
2473                64,
2474                Some("String".to_string()),
2475                Some("var1".to_string()),
2476            ),
2477            create_test_allocation(
2478                0x2000,
2479                128,
2480                Some("Vec<i32>".to_string()),
2481                Some("var2".to_string()),
2482            ),
2483        ];
2484
2485        let start_time = Instant::now();
2486        std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure some time passes
2487
2488        let result = create_performance_metrics(&allocations, start_time);
2489        assert!(result.is_ok());
2490
2491        let metrics = result.unwrap();
2492        assert!(metrics["metadata"]["metrics_type"].as_str().unwrap() == "performance_optimized");
2493        assert!(
2494            metrics["performance"]["allocations_processed"]
2495                .as_u64()
2496                .unwrap()
2497                == 2
2498        );
2499        assert!(
2500            metrics["performance"]["total_processing_time_ms"]
2501                .as_u64()
2502                .unwrap()
2503                > 0
2504        );
2505        assert!(
2506            metrics["performance"]["processing_rate"]["allocations_per_second"]
2507                .as_f64()
2508                .is_some()
2509        );
2510        assert!(
2511            metrics["optimization_status"]["type_caching"]
2512                .as_str()
2513                .unwrap()
2514                == "enabled"
2515        );
2516    }
2517
2518    #[test]
2519    fn test_create_security_violation_analysis_disabled() {
2520        let allocations = vec![create_test_allocation(
2521            0x1000,
2522            64,
2523            Some("String".to_string()),
2524            Some("var1".to_string()),
2525        )];
2526
2527        let options = OptimizedExportOptions::default().security_analysis(false);
2528
2529        let result = create_security_violation_analysis(&allocations, &options);
2530        assert!(result.is_ok());
2531
2532        let analysis = result.unwrap();
2533        assert_eq!(
2534            analysis["metadata"]["analysis_type"].as_str().unwrap(),
2535            "security_violations"
2536        );
2537        assert_eq!(analysis["metadata"]["status"].as_str().unwrap(), "disabled");
2538        assert!(analysis["metadata"]["message"]
2539            .as_str()
2540            .unwrap()
2541            .contains("disabled"));
2542    }
2543
2544    #[test]
2545    fn test_create_security_violation_analysis_enabled() {
2546        let allocations = vec![create_test_allocation(
2547            0x1000,
2548            64,
2549            Some("String".to_string()),
2550            Some("var1".to_string()),
2551        )];
2552
2553        let options = OptimizedExportOptions::default().security_analysis(true);
2554
2555        let result = create_security_violation_analysis(&allocations, &options);
2556        assert!(result.is_ok());
2557
2558        let analysis = result.unwrap();
2559        assert_eq!(
2560            analysis["metadata"]["analysis_type"].as_str().unwrap(),
2561            "security_violations"
2562        );
2563        assert_eq!(
2564            analysis["metadata"]["export_version"].as_str().unwrap(),
2565            "2.0"
2566        );
2567        assert!(analysis["metadata"]["analysis_enabled"].as_bool().unwrap());
2568        assert!(analysis["violation_reports"].is_array());
2569        assert!(analysis["security_summary"].is_object());
2570        assert!(analysis["data_integrity"].is_object());
2571        assert!(analysis["analysis_recommendations"].is_array());
2572    }
2573
2574    #[test]
2575    fn test_optimization_level_equality() {
2576        assert_eq!(OptimizationLevel::Low, OptimizationLevel::Low);
2577        assert_eq!(OptimizationLevel::Medium, OptimizationLevel::Medium);
2578        assert_eq!(OptimizationLevel::High, OptimizationLevel::High);
2579        assert_eq!(OptimizationLevel::Maximum, OptimizationLevel::Maximum);
2580
2581        assert_ne!(OptimizationLevel::Low, OptimizationLevel::High);
2582        assert_ne!(OptimizationLevel::Medium, OptimizationLevel::Maximum);
2583    }
2584
2585    #[test]
2586    fn test_json_file_type_equality() {
2587        assert_eq!(JsonFileType::MemoryAnalysis, JsonFileType::MemoryAnalysis);
2588        assert_eq!(JsonFileType::Lifetime, JsonFileType::Lifetime);
2589        assert_eq!(JsonFileType::UnsafeFfi, JsonFileType::UnsafeFfi);
2590        assert_eq!(JsonFileType::Performance, JsonFileType::Performance);
2591        assert_eq!(JsonFileType::ComplexTypes, JsonFileType::ComplexTypes);
2592        assert_eq!(
2593            JsonFileType::SecurityViolations,
2594            JsonFileType::SecurityViolations
2595        );
2596
2597        assert_ne!(JsonFileType::MemoryAnalysis, JsonFileType::Lifetime);
2598        assert_ne!(JsonFileType::UnsafeFfi, JsonFileType::Performance);
2599    }
2600
2601    #[test]
2602    fn test_optimized_export_options_clone() {
2603        let original = OptimizedExportOptions::default()
2604            .parallel_processing(false)
2605            .buffer_size(512 * 1024);
2606
2607        let cloned = original.clone();
2608
2609        assert_eq!(original.parallel_processing, cloned.parallel_processing);
2610        assert_eq!(original.buffer_size, cloned.buffer_size);
2611        assert_eq!(original.batch_size, cloned.batch_size);
2612        assert_eq!(original.optimization_level, cloned.optimization_level);
2613    }
2614
2615    #[test]
2616    fn test_security_analysis_configuration() {
2617        let options_with_low_severity = OptimizedExportOptions::default()
2618            .include_low_severity(true)
2619            .integrity_hashes(true);
2620
2621        let options_without_low_severity = OptimizedExportOptions::default()
2622            .include_low_severity(false)
2623            .integrity_hashes(false);
2624
2625        assert!(options_with_low_severity.include_low_severity_violations);
2626        assert!(options_with_low_severity.generate_integrity_hashes);
2627
2628        assert!(!options_without_low_severity.include_low_severity_violations);
2629        assert!(!options_without_low_severity.generate_integrity_hashes);
2630    }
2631
2632    #[test]
2633    fn test_fast_export_configuration() {
2634        let options = OptimizedExportOptions::default()
2635            .fast_export_mode(true)
2636            .auto_fast_export_threshold(Some(1000));
2637
2638        assert!(options.enable_fast_export_mode);
2639        assert_eq!(options.auto_fast_export_threshold, Some(1000));
2640
2641        let options_no_auto = OptimizedExportOptions::default().auto_fast_export_threshold(None);
2642
2643        assert!(options_no_auto.auto_fast_export_threshold.is_none());
2644    }
2645
2646    #[test]
2647    fn test_thread_count_configuration() {
2648        let options_auto = OptimizedExportOptions::default();
2649        assert!(options_auto.thread_count.is_none()); // Auto-detect
2650
2651        let options_manual = OptimizedExportOptions::default().thread_count(Some(8));
2652        assert_eq!(options_manual.thread_count, Some(8));
2653    }
2654
2655    #[test]
2656    fn test_buffer_size_validation() {
2657        let small_buffer = OptimizedExportOptions::default().buffer_size(1024); // 1KB
2658        assert_eq!(small_buffer.buffer_size, 1024);
2659
2660        let large_buffer = OptimizedExportOptions::default().buffer_size(1024 * 1024); // 1MB
2661        assert_eq!(large_buffer.buffer_size, 1024 * 1024);
2662    }
2663
2664    #[test]
2665    fn test_batch_size_validation() {
2666        let small_batch = OptimizedExportOptions::default().batch_size(100);
2667        assert_eq!(small_batch.batch_size, 100);
2668
2669        let large_batch = OptimizedExportOptions::default().batch_size(10000);
2670        assert_eq!(large_batch.batch_size, 10000);
2671    }
2672
2673    #[test]
2674    fn test_cache_size_validation() {
2675        let small_cache = OptimizedExportOptions::default().max_cache_size(500);
2676        assert_eq!(small_cache.max_cache_size, 500);
2677
2678        let large_cache = OptimizedExportOptions::default().max_cache_size(5000);
2679        assert_eq!(large_cache.max_cache_size, 5000);
2680    }
2681
2682    #[test]
2683    fn test_streaming_json_writer() {
2684        let mut buffer = Vec::new();
2685        let mut writer = StreamingJsonWriter::new(&mut buffer);
2686
2687        let test_data = serde_json::json!({
2688            "test": "value",
2689            "number": 42,
2690            "array": [1, 2, 3]
2691        });
2692
2693        let result = writer.write_complete_json(&test_data);
2694        assert!(result.is_ok());
2695
2696        let result = writer.finalize();
2697        assert!(result.is_ok());
2698
2699        // Verify the JSON was written correctly
2700        let written_json: serde_json::Value = serde_json::from_slice(&buffer).unwrap();
2701        assert_eq!(written_json["test"].as_str().unwrap(), "value");
2702        assert_eq!(written_json["number"].as_u64().unwrap(), 42);
2703        assert_eq!(written_json["array"].as_array().unwrap().len(), 3);
2704    }
2705
2706    #[test]
2707    fn test_streaming_json_writer_pretty() {
2708        let mut buffer = Vec::new();
2709        let mut writer = StreamingJsonWriter::new(&mut buffer);
2710
2711        let test_data = serde_json::json!({
2712            "test": "pretty",
2713            "formatted": true
2714        });
2715
2716        let result = writer.write_pretty_json(&test_data);
2717        assert!(result.is_ok());
2718
2719        let result = writer.finalize();
2720        assert!(result.is_ok());
2721
2722        // Verify the JSON was written and is valid
2723        let written_json: serde_json::Value = serde_json::from_slice(&buffer).unwrap();
2724        assert_eq!(written_json["test"].as_str().unwrap(), "pretty");
2725        assert!(written_json["formatted"].as_bool().unwrap());
2726
2727        // Check that it's pretty formatted (contains newlines and spaces)
2728        let json_string = String::from_utf8(buffer).unwrap();
2729        assert!(json_string.contains('\n'));
2730        assert!(json_string.contains("  ")); // Indentation
2731    }
2732
2733    #[test]
2734    fn test_type_cache_operations() {
2735        // Clear cache first
2736        clear_type_cache();
2737
2738        // Test cache miss and population
2739        let type_info1 = get_or_compute_type_info("Vec<String>", 128);
2740        assert_eq!(type_info1, "Vec<T>");
2741
2742        // Test cache hit (should return same result)
2743        let type_info2 = get_or_compute_type_info("Vec<String>", 128);
2744        assert_eq!(type_info2, "Vec<T>");
2745        assert_eq!(type_info1, type_info2);
2746
2747        // Test different type
2748        let type_info3 = get_or_compute_type_info("HashMap<String, i32>", 256);
2749        assert_eq!(type_info3, "HashMap<K,V>");
2750
2751        // Clear cache and verify
2752        clear_type_cache();
2753        let type_info4 = get_or_compute_type_info("Vec<String>", 128);
2754        assert_eq!(type_info4, "Vec<T>"); // Should still work after cache clear
2755    }
2756
2757    #[test]
2758    fn test_compute_enhanced_type_info() {
2759        // Test Vec types
2760        assert_eq!(compute_enhanced_type_info("Vec<String>", 100), "Vec<T>");
2761        assert_eq!(compute_enhanced_type_info("Vec<i32>", 200), "Vec<T>");
2762
2763        // Test HashMap types
2764        assert_eq!(
2765            compute_enhanced_type_info("HashMap<String, i32>", 300),
2766            "HashMap<K,V>"
2767        );
2768        assert_eq!(
2769            compute_enhanced_type_info("HashMap<u64, String>", 400),
2770            "HashMap<K,V>"
2771        );
2772
2773        // Test String types
2774        assert_eq!(compute_enhanced_type_info("String", 50), "String");
2775        assert_eq!(
2776            compute_enhanced_type_info("std::string::String", 60),
2777            "String"
2778        );
2779
2780        // Test size-based categorization
2781        assert_eq!(compute_enhanced_type_info("Unknown", 4), "Primitive");
2782        assert_eq!(compute_enhanced_type_info("Unknown", 16), "SmallStruct");
2783        assert_eq!(compute_enhanced_type_info("Unknown", 64), "MediumStruct");
2784        assert_eq!(compute_enhanced_type_info("Unknown", 512), "LargeStruct");
2785        assert_eq!(compute_enhanced_type_info("Unknown", 2048), "Buffer");
2786    }
2787
2788    #[test]
2789    fn test_estimate_json_size() {
2790        // Test simple object
2791        let simple_obj = serde_json::json!({
2792            "key": "value"
2793        });
2794        let size1 = estimate_json_size(&simple_obj);
2795        assert!(size1 > 0);
2796
2797        // Test array
2798        let array = serde_json::json!([1, 2, 3, 4, 5]);
2799        let size2 = estimate_json_size(&array);
2800        assert!(size2 > 0);
2801
2802        // Test complex nested structure
2803        let complex = serde_json::json!({
2804            "data": {
2805                "items": [
2806                    {"id": 1, "name": "item1"},
2807                    {"id": 2, "name": "item2"}
2808                ],
2809                "metadata": {
2810                    "count": 2,
2811                    "description": "test data"
2812                }
2813            }
2814        });
2815        let size3 = estimate_json_size(&complex);
2816        assert!(size3 > size1);
2817        assert!(size3 > size2);
2818
2819        // Test string
2820        let string_val = serde_json::json!("This is a test string");
2821        let size4 = estimate_json_size(&string_val);
2822        assert!(size4 > 20); // String length + overhead
2823
2824        // Test primitive
2825        let number = serde_json::json!(42);
2826        let size5 = estimate_json_size(&number);
2827        assert_eq!(size5, 20); // Default primitive size
2828    }
2829
2830    #[test]
2831    fn test_normalize_type_name() {
2832        // Test generic type normalization
2833        assert_eq!(normalize_type_name("Vec<String>"), "Vec<T>");
2834        assert_eq!(normalize_type_name("HashMap<String, i32>"), "HashMap<T>");
2835        assert_eq!(
2836            normalize_type_name("Option<Result<String, Error>>"),
2837            "Option<T>"
2838        );
2839
2840        // Test non-generic types
2841        assert_eq!(normalize_type_name("String"), "String");
2842        assert_eq!(normalize_type_name("i32"), "i32");
2843        assert_eq!(normalize_type_name("MyStruct"), "MyStruct");
2844
2845        // Test edge cases
2846        assert_eq!(normalize_type_name(""), "");
2847        assert_eq!(normalize_type_name("Vec"), "Vec");
2848        assert_eq!(normalize_type_name("Vec<>"), "Vec<T>");
2849    }
2850
2851    #[test]
2852    fn test_categorize_complex_type() {
2853        // Test trait objects
2854        assert_eq!(categorize_complex_type("dyn Display"), "TraitObject");
2855        assert_eq!(categorize_complex_type("dyn Debug + Send"), "TraitObject");
2856
2857        // Test smart pointers
2858        assert_eq!(categorize_complex_type("Box<String>"), "SmartPointer");
2859        assert_eq!(categorize_complex_type("Rc<RefCell<i32>>"), "SmartPointer");
2860        assert_eq!(
2861            categorize_complex_type("Arc<Mutex<Vec<u8>>>"),
2862            "SmartPointer"
2863        );
2864        assert_eq!(
2865            categorize_complex_type("RefCell<HashMap<String, i32>>"),
2866            "SmartPointer"
2867        );
2868
2869        // Test collections
2870        assert_eq!(categorize_complex_type("Vec<String>"), "Collection");
2871        assert_eq!(
2872            categorize_complex_type("HashMap<String, i32>"),
2873            "Collection"
2874        );
2875        assert_eq!(
2876            categorize_complex_type("BTreeMap<u64, String>"),
2877            "Collection"
2878        );
2879        assert_eq!(categorize_complex_type("HashSet<String>"), "Collection");
2880
2881        // Test generic types
2882        assert_eq!(categorize_complex_type("Option<String>"), "Generic");
2883        assert_eq!(categorize_complex_type("Result<i32, Error>"), "Generic");
2884        assert_eq!(categorize_complex_type("MyStruct<T, U>"), "Generic");
2885
2886        // Test module paths
2887        assert_eq!(
2888            categorize_complex_type("std::collections::HashMap"),
2889            "ModulePath"
2890        );
2891        assert_eq!(
2892            categorize_complex_type("crate::my_module::MyType"),
2893            "ModulePath"
2894        );
2895
2896        // Test simple types
2897        assert_eq!(categorize_complex_type("String"), "Simple");
2898        assert_eq!(categorize_complex_type("i32"), "Simple");
2899        assert_eq!(categorize_complex_type("MyStruct"), "Simple");
2900    }
2901
2902    #[test]
2903    fn test_calculate_type_complexity() {
2904        // Test simple types
2905        assert_eq!(calculate_type_complexity("i32"), 1);
2906        assert_eq!(calculate_type_complexity("String"), 1);
2907
2908        // Test generic types: base(1) + matches('<')(2) + nesting_level(3)
2909        assert_eq!(calculate_type_complexity("Vec<String>"), 6); // 1 + 1*2 + 1*3
2910        assert_eq!(calculate_type_complexity("HashMap<String, i32>"), 6); // 1 + 1*2 + 1*3
2911
2912        // Test nested generics: base(1) + matches('<')(2*2) + nesting_level(2*3)
2913        assert_eq!(calculate_type_complexity("Vec<Option<String>>"), 11); // 1 + 2*2 + 2*3
2914
2915        // Test trait objects: base(1) + dyn(5)
2916        assert_eq!(calculate_type_complexity("dyn Display"), 6); // 1 + 5
2917        assert_eq!(calculate_type_complexity("dyn Debug + Send"), 6); // 1 + 5
2918
2919        // Test impl types: base(1) + impl(4)
2920        assert_eq!(calculate_type_complexity("impl Iterator"), 5); // 1 + 4
2921
2922        // Test async types: base(1) + async(3)
2923        assert_eq!(calculate_type_complexity("async fn()"), 4); // 1 + 3
2924                                                                // Future<Output = i32>: base(1) + matches('<')(1*2) + nesting_level(1*3) + Future(3)
2925        assert_eq!(calculate_type_complexity("Future<Output = i32>"), 9); // 1 + 2 + 3 + 3
2926
2927        // Test smart pointers
2928        // Box<String>: base(1) + matches('<')(1*2) + nesting_level(1*3) + Box(2)
2929        assert_eq!(calculate_type_complexity("Box<String>"), 8); // 1 + 2 + 3 + 2
2930                                                                 // Rc<RefCell<i32>>: base(1) + matches('<')(2*2) + nesting_level(2*3) + Rc(3) + RefCell(3)
2931        assert_eq!(calculate_type_complexity("Rc<RefCell<i32>>"), 17); // 1 + 4 + 6 + 3 + 3
2932                                                                       // Arc<Mutex<Vec<String>>>: base(1) + matches('<')(3*2) + nesting_level(3*3) + Arc(4)
2933        assert_eq!(calculate_type_complexity("Arc<Mutex<Vec<String>>>"), 20); // 1 + 6 + 9 + 4
2934    }
2935
2936    #[test]
2937    fn test_calculate_memory_efficiency() {
2938        // Test Vec efficiency
2939        assert_eq!(calculate_memory_efficiency("Vec<String>", 32, 1), 60); // Small Vec
2940        assert_eq!(calculate_memory_efficiency("Vec<i32>", 128, 1), 85); // Larger Vec
2941
2942        // Test HashMap efficiency
2943        assert_eq!(
2944            calculate_memory_efficiency("HashMap<String, i32>", 64, 1),
2945            50
2946        ); // Small HashMap
2947        assert_eq!(
2948            calculate_memory_efficiency("HashMap<u64, String>", 256, 1),
2949            75
2950        ); // Larger HashMap
2951
2952        // Test Box efficiency
2953        assert_eq!(calculate_memory_efficiency("Box<String>", 100, 1), 90);
2954
2955        // Test reference counting
2956        assert_eq!(calculate_memory_efficiency("Arc<String>", 100, 1), 80);
2957        assert_eq!(calculate_memory_efficiency("Rc<i32>", 50, 1), 80);
2958
2959        // Test default efficiency
2960        assert_eq!(calculate_memory_efficiency("MyStruct", 100, 1), 85);
2961
2962        // Test zero count edge case
2963        assert_eq!(calculate_memory_efficiency("Vec<String>", 1000, 0), 100);
2964    }
2965
2966    #[test]
2967    fn test_generate_optimization_suggestions() {
2968        let mut info = ComplexTypeInfo::new();
2969
2970        // Test high allocation count suggestions
2971        info.allocation_count = 150;
2972        info.total_size = 1000;
2973        info.complexity_score = 5;
2974        let suggestions = generate_optimization_suggestions("String", &info);
2975        assert!(suggestions.iter().any(|s| s.contains("object pooling")));
2976
2977        // Test large Vec suggestions
2978        info.allocation_count = 10;
2979        info.total_size = 2 * 1024 * 1024; // 2MB
2980        info.complexity_score = 3;
2981        let suggestions = generate_optimization_suggestions("Vec<String>", &info);
2982        assert!(suggestions
2983            .iter()
2984            .any(|s| s.contains("pre-allocating Vec capacity")));
2985
2986        // Test HashMap suggestions
2987        info.allocation_count = 60;
2988        info.total_size = 100000;
2989        info.complexity_score = 4;
2990        let suggestions = generate_optimization_suggestions("HashMap<String, i32>", &info);
2991        assert!(suggestions.iter().any(|s| s.contains("FxHashMap")));
2992
2993        // Test Box suggestions
2994        info.allocation_count = 250;
2995        info.total_size = 50000;
2996        info.complexity_score = 2;
2997        let suggestions = generate_optimization_suggestions("Box<i32>", &info);
2998        assert!(suggestions.iter().any(|s| s.contains("arena allocation")));
2999
3000        // Test high complexity suggestions
3001        info.allocation_count = 10;
3002        info.total_size = 1000;
3003        info.complexity_score = 15;
3004        let suggestions = generate_optimization_suggestions("ComplexType<T, U, V>", &info);
3005        assert!(suggestions
3006            .iter()
3007            .any(|s| s.contains("High complexity type")));
3008    }
3009
3010    #[test]
3011    fn test_calculate_complexity_distribution() {
3012        let type_analysis = vec![
3013            serde_json::json!({"complexity_score": 2}),
3014            serde_json::json!({"complexity_score": 5}),
3015            serde_json::json!({"complexity_score": 10}),
3016            serde_json::json!({"complexity_score": 20}),
3017            serde_json::json!({"complexity_score": 1}),
3018            serde_json::json!({"complexity_score": 7}),
3019        ];
3020
3021        let distribution = calculate_complexity_distribution(&type_analysis);
3022
3023        assert_eq!(distribution["low_complexity"].as_u64().unwrap(), 2); // scores 1, 2
3024        assert_eq!(distribution["medium_complexity"].as_u64().unwrap(), 2); // scores 5, 7
3025        assert_eq!(distribution["high_complexity"].as_u64().unwrap(), 1); // score 10
3026        assert_eq!(distribution["very_high_complexity"].as_u64().unwrap(), 1); // score 20
3027    }
3028
3029    #[test]
3030    fn test_generate_global_optimization_recommendations() {
3031        // Test with many high-complexity types (need more than 5 high allocation count items)
3032        let high_complexity_analysis = vec![
3033            serde_json::json!({"complexity_score": 15, "allocation_count": 150}),
3034            serde_json::json!({"complexity_score": 12, "allocation_count": 130}),
3035            serde_json::json!({"complexity_score": 18, "allocation_count": 120}),
3036            serde_json::json!({"complexity_score": 20, "allocation_count": 200}),
3037            serde_json::json!({"complexity_score": 11, "allocation_count": 110}),
3038            serde_json::json!({"complexity_score": 14, "allocation_count": 140}),
3039        ];
3040
3041        let recommendations =
3042            generate_global_optimization_recommendations(&high_complexity_analysis);
3043        assert!(recommendations
3044            .iter()
3045            .any(|r| r.contains("refactoring high-complexity types")));
3046        assert!(recommendations.iter().any(|r| r.contains("object pooling")));
3047        assert!(recommendations.iter().any(|r| r.contains("cargo clippy")));
3048        assert!(recommendations.iter().any(|r| r.contains("perf")));
3049
3050        // Test with low-complexity types
3051        let low_complexity_analysis = vec![
3052            serde_json::json!({"complexity_score": 2, "allocation_count": 10}),
3053            serde_json::json!({"complexity_score": 3, "allocation_count": 5}),
3054        ];
3055
3056        let recommendations =
3057            generate_global_optimization_recommendations(&low_complexity_analysis);
3058        // Should not suggest refactoring for low complexity
3059        assert!(!recommendations
3060            .iter()
3061            .any(|r| r.contains("refactoring high-complexity types")));
3062        // Should not suggest object pooling for low allocation counts
3063        assert!(!recommendations.iter().any(|r| r.contains("object pooling")));
3064        // But should still have general recommendations
3065        assert!(recommendations.iter().any(|r| r.contains("cargo clippy")));
3066    }
3067
3068    #[test]
3069    fn test_complex_type_info_merge() {
3070        let mut info1 = ComplexTypeInfo {
3071            category: "Generic".to_string(),
3072            total_size: 1000,
3073            allocation_count: 10,
3074            max_size: 200,
3075            complexity_score: 5,
3076        };
3077
3078        let info2 = ComplexTypeInfo {
3079            category: "".to_string(), // Empty category
3080            total_size: 500,
3081            allocation_count: 5,
3082            max_size: 150,
3083            complexity_score: 3,
3084        };
3085
3086        info1.merge(info2);
3087
3088        assert_eq!(info1.category, "Generic"); // Should keep original category
3089        assert_eq!(info1.total_size, 1500); // 1000 + 500
3090        assert_eq!(info1.allocation_count, 15); // 10 + 5
3091        assert_eq!(info1.max_size, 200); // max(200, 150)
3092        assert_eq!(info1.complexity_score, 5); // max(5, 3)
3093    }
3094
3095    #[test]
3096    fn test_complex_type_info_merge_empty_category() {
3097        let mut info1 = ComplexTypeInfo {
3098            category: "".to_string(), // Empty category
3099            total_size: 100,
3100            allocation_count: 1,
3101            max_size: 100,
3102            complexity_score: 2,
3103        };
3104
3105        let info2 = ComplexTypeInfo {
3106            category: "Collection".to_string(),
3107            total_size: 200,
3108            allocation_count: 2,
3109            max_size: 150,
3110            complexity_score: 4,
3111        };
3112
3113        info1.merge(info2);
3114
3115        assert_eq!(info1.category, "Collection"); // Should take non-empty category
3116        assert_eq!(info1.total_size, 300);
3117        assert_eq!(info1.allocation_count, 3);
3118        assert_eq!(info1.max_size, 150);
3119        assert_eq!(info1.complexity_score, 4);
3120    }
3121}