memscope_rs/export/binary/
parser.rs

1//! Optimized binary file parser
2
3#![allow(dead_code)] // Allow unused functions for future use and backwards compatibility
4
5use crate::core::types::AllocationInfo;
6use crate::export::analysis_engine::{AnalysisEngine, StandardAnalysisEngine};
7use crate::export::binary::{BinaryExportError, BinaryReader};
8use std::path::Path;
9use std::sync::Arc;
10use std::time::Instant;
11
12/// Binary parser for optimized file conversion
13pub struct BinaryParser;
14
15impl BinaryParser {
16    /// Convert binary file to standard JSON files using optimized approach
17    pub fn to_standard_json_files<P: AsRef<Path>>(
18        binary_path: P,
19        base_name: &str,
20    ) -> Result<(), BinaryExportError> {
21        let start = Instant::now();
22        let binary_path = binary_path.as_ref();
23
24        // Create output directory structure
25        let base_memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
26        let project_dir = base_memory_analysis_dir.join(base_name);
27        std::fs::create_dir_all(&project_dir)?;
28
29        // Load allocations - only user-defined variables for performance
30        let allocations = Self::load_allocations(binary_path)?;
31        let user_allocations: Vec<AllocationInfo> = allocations
32            .into_iter()
33            .filter(|a| a.var_name.is_some())
34            .collect();
35
36        // Use StandardAnalysisEngine but with filtered user allocations for performance
37        let analysis_engine = StandardAnalysisEngine::new();
38
39        // Generate 5 JSON files with proper analysis data
40        let analyses = [
41            (
42                "memory_analysis",
43                analysis_engine
44                    .create_memory_analysis(&user_allocations)
45                    .map_err(|e| {
46                        BinaryExportError::CorruptedData(format!("Memory analysis failed: {e}"))
47                    })?,
48            ),
49            (
50                "lifetime",
51                analysis_engine
52                    .create_lifetime_analysis(&user_allocations)
53                    .map_err(|e| {
54                        BinaryExportError::CorruptedData(format!("Lifetime analysis failed: {e}"))
55                    })?,
56            ),
57            (
58                "performance",
59                analysis_engine
60                    .create_performance_analysis(&user_allocations)
61                    .map_err(|e| {
62                        BinaryExportError::CorruptedData(format!(
63                            "Performance analysis failed: {e}",
64                        ))
65                    })?,
66            ),
67            (
68                "unsafe_ffi",
69                analysis_engine
70                    .create_unsafe_ffi_analysis(&user_allocations)
71                    .map_err(|e| {
72                        BinaryExportError::CorruptedData(
73                            format!("Unsafe FFI analysis failed: {e}",),
74                        )
75                    })?,
76            ),
77            (
78                "complex_types",
79                analysis_engine
80                    .create_complex_types_analysis(&user_allocations)
81                    .map_err(|e| {
82                        BinaryExportError::CorruptedData(format!(
83                            "Complex types analysis failed: {e}",
84                        ))
85                    })?,
86            ),
87        ];
88
89        for (file_type, analysis_data) in analyses {
90            let file_path = project_dir.join(format!("{base_name}_{file_type}.json"));
91            let json_content = serde_json::to_string(&analysis_data.data).map_err(|e| {
92                BinaryExportError::SerializationError(format!("JSON serialization failed: {e}"))
93            })?;
94            std::fs::write(file_path, json_content)?;
95        }
96
97        let elapsed = start.elapsed();
98        if elapsed.as_millis() > 300 {
99            tracing::warn!(
100                "Performance target missed: {}ms (target: <300ms)",
101                elapsed.as_millis()
102            );
103        } else {
104            tracing::debug!(
105                "Optimized conversion completed in {}ms",
106                elapsed.as_millis()
107            );
108        }
109
110        Ok(())
111    }
112
113    /// Load allocations from binary file
114    pub fn load_allocations<P: AsRef<Path>>(
115        binary_path: P,
116    ) -> Result<Vec<AllocationInfo>, BinaryExportError> {
117        let mut reader = BinaryReader::new(binary_path)?;
118        reader.read_all()
119    }
120
121    /// Load allocations with enhanced error recovery
122    ///
123    /// fix "failed to fill whole buffer"
124    pub fn load_allocations_with_recovery<P: AsRef<Path>>(
125        binary_path: P,
126    ) -> Result<Vec<AllocationInfo>, BinaryExportError> {
127        let binary_path = binary_path.as_ref();
128
129        // first check file size and integrity
130        let file_metadata = std::fs::metadata(binary_path)?;
131        let file_size = file_metadata.len();
132        tracing::debug!("Binary file size: {file_size} bytes");
133
134        // try normal read
135        match Self::load_allocations(binary_path) {
136            Ok(allocations) => {
137                tracing::info!(
138                    "Successfully loaded {} allocations normally",
139                    allocations.len()
140                );
141                Ok(allocations)
142            }
143            Err(BinaryExportError::Io(ref e)) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
144                tracing::warn!("Encountered EOF error, attempting recovery read");
145
146                // use recovery mode read
147                let mut reader = BinaryReader::new(binary_path)?;
148                let header = reader.read_header()?;
149                let mut allocations = Vec::new();
150
151                // read one by one, stop when error
152                for i in 0..header.total_count {
153                    match reader.read_allocation() {
154                        Ok(allocation) => allocations.push(allocation),
155                        Err(BinaryExportError::Io(ref e))
156                            if e.kind() == std::io::ErrorKind::UnexpectedEof =>
157                        {
158                            tracing::warn!(
159                                "Recovered {i} of {} allocations before EOF",
160                                header.total_count
161                            );
162                            break;
163                        }
164                        Err(e) => {
165                            tracing::error!("Failed to read allocation {i}: {e}");
166                            return Err(e);
167                        }
168                    }
169                }
170
171                if allocations.is_empty() {
172                    return Err(BinaryExportError::CorruptedData(
173                        "No allocations could be recovered from corrupted file".to_string(),
174                    ));
175                }
176
177                tracing::info!("Successfully recovered {} allocations", allocations.len());
178                Ok(allocations)
179            }
180            Err(e) => {
181                tracing::error!("Failed to load allocations: {e}");
182                Err(e)
183            }
184        }
185    }
186
187    /// Convert binary file to single JSON format (legacy compatibility)
188    /// 🔥 FIXED: Memory overflow issue - now uses streaming instead of loading all data at once
189    pub fn to_json<P: AsRef<Path>>(binary_path: P, json_path: P) -> Result<(), BinaryExportError> {
190        use std::io::{BufWriter, Write};
191
192        // Open binary reader for streaming access
193        let mut reader = BinaryReader::new(&binary_path)?;
194        let header = reader.read_header()?;
195
196        // Create buffered writer for efficient output
197        let file = std::fs::File::create(json_path)?;
198        let mut writer = BufWriter::with_capacity(2 * 1024 * 1024, file); // 2MB buffer
199
200        // Write JSON array start
201        writer.write_all(b"[")?;
202
203        // Stream allocations one by one to avoid memory overflow
204        for i in 0..header.total_count {
205            if i > 0 {
206                writer.write_all(b",")?;
207            }
208
209            // Read one allocation at a time
210            let allocation = reader.read_allocation()?;
211
212            // Serialize single allocation (much smaller memory footprint)
213            let allocation_json = serde_json::to_string(&allocation).map_err(|e| {
214                BinaryExportError::SerializationError(format!("JSON serialization failed: {e}"))
215            })?;
216
217            writer.write_all(allocation_json.as_bytes())?;
218        }
219
220        // Write JSON array end
221        writer.write_all(b"]")?;
222        writer.flush()?;
223
224        Ok(())
225    }
226
227    /// Convert binary file to HTML format (legacy compatibility)
228    pub fn to_html<P: AsRef<Path>>(binary_path: P, html_path: P) -> Result<(), BinaryExportError> {
229        let allocations = Self::load_allocations(binary_path)?;
230        let html_content = format!(
231            r#"<!DOCTYPE html>
232<html>
233<head><title>Memory Analysis</title></head>
234<body>
235<h1>Memory Analysis Report</h1>
236<p>Total allocations: {}</p>
237<pre>{}</pre>
238</body>
239</html>"#,
240            allocations.len(),
241            serde_json::to_string_pretty(&allocations).map_err(|e| {
242                BinaryExportError::SerializationError(format!("JSON serialization failed: {e}"))
243            })?
244        );
245        std::fs::write(html_path, html_content)?;
246        Ok(())
247    }
248
249    /// Parse user binary to JSON using BinaryReader for consistency and performance
250    /// Now uses the same BinaryReader approach as full binary parsing for consistent performance
251    pub fn parse_user_binary_to_json<P: AsRef<Path>>(
252        binary_path: P,
253        base_name: &str,
254    ) -> Result<(), BinaryExportError> {
255        let start = Instant::now();
256        tracing::info!("🚀 Starting user binary to JSON conversion using BinaryReader");
257
258        // Use the same BinaryReader approach as full-binary for consistency
259        Self::parse_binary_to_json_with_index(&binary_path, base_name)?;
260
261        let elapsed = start.elapsed();
262
263        // Performance target check: <300ms for user binary processing
264        if elapsed.as_millis() > 300 {
265            tracing::warn!(
266                "⚠️  Performance target missed: {}ms (target: <300ms)",
267                elapsed.as_millis()
268            );
269        } else {
270            tracing::info!(
271                "🎉 Ultra-fast user binary conversion completed: {}ms (target: <300ms)",
272                elapsed.as_millis()
273            );
274        }
275
276        Ok(())
277    }
278
279    /// Parse full binary to JSON using ultra-fast direct approach
280    ///
281    /// **One-Stop Solution**: Directly use the optimized generate_*_json method to avoid SelectiveJsonExporter's I/O errors.
282    ///
283    /// Core Optimizations:
284    /// - Use load_allocations with improved error handling
285    /// - Directly call optimized generate_*_json method (avoid complex SelectiveJsonExporter)
286    /// - Parallel generate 5 JSON files
287    /// - aim: <300ms, no null fields, JSON format consistent
288    pub fn parse_full_binary_to_json<P: AsRef<Path>>(
289        binary_path: P,
290        base_name: &str,
291    ) -> Result<(), BinaryExportError> {
292        let start = Instant::now();
293        tracing::info!("Starting ultra-fast full binary to JSON conversion (direct approach)");
294
295        // Load all allocations with improved error handling
296        let load_start = Instant::now();
297        let all_allocations = Self::load_allocations_with_recovery(&binary_path)?;
298        let load_time = load_start.elapsed();
299        tracing::info!(
300            "Loaded {} allocations in {}ms with error recovery",
301            all_allocations.len(),
302            load_time.as_millis()
303        );
304
305        // Create output directory
306        let base_memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
307        let project_dir = base_memory_analysis_dir.join(base_name);
308        std::fs::create_dir_all(&project_dir)?;
309
310        // **One-Stop Solution**: Parallel generate 5 JSON files, avoid SelectiveJsonExporter's I/O problem
311        let json_start = Instant::now();
312
313        let paths = [
314            project_dir.join(format!("{base_name}_memory_analysis.json")),
315            project_dir.join(format!("{base_name}_lifetime.json")),
316            project_dir.join(format!("{base_name}_performance.json")),
317            project_dir.join(format!("{base_name}_unsafe_ffi.json")),
318            project_dir.join(format!("{base_name}_complex_types.json")),
319        ];
320
321        // 🔥 FIXED: Use sequential processing to avoid memory issues with large datasets
322        // Parallel processing was causing memory pressure and potential race conditions
323        let generators = [
324            Self::generate_memory_analysis_json,
325            Self::generate_lifetime_analysis_json,
326            Self::generate_performance_analysis_json,
327            Self::generate_unsafe_ffi_analysis_json,
328            Self::generate_complex_types_analysis_json,
329        ];
330
331        for (i, path) in paths.iter().enumerate() {
332            generators[i](&all_allocations, path)?;
333        }
334
335        let json_time = json_start.elapsed();
336        tracing::info!(
337            "Generated 5 JSON files in parallel in {}ms",
338            json_time.as_millis()
339        );
340
341        let elapsed = start.elapsed();
342
343        // Performance target check: <300ms for full binary processing
344        if elapsed.as_millis() > 300 {
345            tracing::warn!(
346                "Performance target missed: {}ms (target: <300ms)",
347                elapsed.as_millis()
348            );
349        } else {
350            tracing::info!(
351                "✅ Ultra-fast full binary conversion completed in {}ms (target: <300ms)",
352                elapsed.as_millis()
353            );
354        }
355
356        Ok(())
357    }
358
359    /// Ultra-fast binary to JSON conversion using existing optimizations
360    ///
361    /// This method provides the same ultra-fast performance as v5-draft
362    pub fn parse_full_binary_to_json_with_existing_optimizations<P: AsRef<Path>>(
363        binary_path: P,
364        base_name: &str,
365    ) -> Result<(), BinaryExportError> {
366        let start = std::time::Instant::now();
367        tracing::info!("🚀 Starting ultra-fast binary to JSON conversion using BinaryReader");
368
369        // Use BinaryReader for direct, efficient data access (v5-draft approach)
370        Self::parse_binary_to_json_with_index(&binary_path, base_name)?;
371
372        let total_time = start.elapsed();
373
374        if total_time.as_millis() > 300 {
375            tracing::warn!(
376                "⚠️  Performance target missed: {}ms (target: <300ms)",
377                total_time.as_millis()
378            );
379        } else {
380            tracing::info!(
381                "🎉 Ultra-fast conversion completed: {}ms (target: <300ms)",
382                total_time.as_millis()
383            );
384        }
385
386        Ok(())
387    }
388
389    /// Generate memory analysis JSON directly (fast path)
390    pub fn generate_memory_analysis_json(
391        allocations: &[AllocationInfo],
392        output_path: &std::path::Path,
393    ) -> Result<(), BinaryExportError> {
394        use std::io::{BufWriter, Write};
395
396        let file = std::fs::File::create(output_path)?;
397        let mut writer = BufWriter::with_capacity(64 * 1024, file);
398
399        // Pre-allocate string buffer for reuse
400        let mut buffer = String::with_capacity(512);
401
402        // Write JSON header
403        writer.write_all(b"{\"data\":{\"allocations\":[")?;
404
405        // Write allocations directly with buffering
406        for (i, alloc) in allocations.iter().enumerate() {
407            if i > 0 {
408                writer.write_all(b",")?;
409            }
410
411            buffer.clear();
412            buffer.push_str("{\"ptr\":\"0x");
413            buffer.push_str(&format!("{:x}", alloc.ptr));
414            buffer.push_str("\",\"size\":");
415            buffer.push_str(&alloc.size.to_string());
416            buffer.push_str(",\"var_name\":\"");
417            // Full-binary mode: no null fields allowed (requirement 21) - direct access without inference
418            buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
419            buffer.push_str("\",\"type_name\":\"");
420            buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
421            buffer.push_str("\",\"scope_name\":\"");
422            buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
423            buffer.push_str("\",\"timestamp_alloc\":");
424            buffer.push_str(&alloc.timestamp_alloc.to_string());
425            buffer.push_str(",\"thread_id\":\"");
426            buffer.push_str(&alloc.thread_id);
427            buffer.push_str("\",\"borrow_count\":");
428            buffer.push_str(&alloc.borrow_count.to_string());
429            buffer.push_str(",\"is_leaked\":");
430            buffer.push_str(if alloc.is_leaked { "true" } else { "false" });
431            buffer.push('}');
432
433            writer.write_all(buffer.as_bytes())?;
434        }
435
436        // Write JSON footer
437        writer.write_all(b"]}}")?;
438        writer.flush()?;
439
440        Ok(())
441    }
442
443    /// Generate lifetime analysis JSON directly (fast path)
444    pub fn generate_lifetime_analysis_json(
445        allocations: &[AllocationInfo],
446        output_path: &std::path::Path,
447    ) -> Result<(), BinaryExportError> {
448        use std::io::{BufWriter, Write};
449
450        let file = std::fs::File::create(output_path)?;
451        let mut writer = BufWriter::with_capacity(64 * 1024, file);
452        let mut buffer = String::with_capacity(256);
453
454        writer.write_all(b"{\"lifecycle_events\":[")?;
455
456        for (i, alloc) in allocations.iter().enumerate() {
457            if i > 0 {
458                writer.write_all(b",")?;
459            }
460
461            buffer.clear();
462            buffer.push_str("{\"event\":\"allocation\",\"ptr\":\"0x");
463            buffer.push_str(&format!("{:x}", alloc.ptr));
464            buffer.push_str("\",\"scope\":\"");
465            buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
466            buffer.push_str("\",\"size\":");
467            buffer.push_str(&alloc.size.to_string());
468            buffer.push_str(",\"timestamp\":");
469            buffer.push_str(&alloc.timestamp_alloc.to_string());
470            buffer.push_str(",\"type_name\":\"");
471            buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
472            buffer.push_str("\",\"var_name\":\"");
473            buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
474            buffer.push_str("\"}");
475
476            writer.write_all(buffer.as_bytes())?;
477        }
478
479        writer.write_all(b"]}")?;
480        writer.flush()?;
481
482        Ok(())
483    }
484
485    /// Generate performance analysis JSON directly (fast path)
486    pub fn generate_performance_analysis_json(
487        allocations: &[AllocationInfo],
488        output_path: &std::path::Path,
489    ) -> Result<(), BinaryExportError> {
490        use std::io::{BufWriter, Write};
491
492        let file = std::fs::File::create(output_path)?;
493        let mut writer = BufWriter::with_capacity(64 * 1024, file);
494        let mut buffer = String::with_capacity(512);
495
496        writer.write_all(b"{\"data\":{\"allocations\":[")?;
497
498        for (i, alloc) in allocations.iter().enumerate() {
499            if i > 0 {
500                writer.write_all(b",")?;
501            }
502
503            buffer.clear();
504            buffer.push_str("{\"ptr\":\"0x");
505            buffer.push_str(&format!("{:x}", alloc.ptr));
506            buffer.push_str("\",\"size\":");
507            buffer.push_str(&alloc.size.to_string());
508            buffer.push_str(",\"var_name\":\"");
509            buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
510            buffer.push_str("\",\"type_name\":\"");
511            buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
512            buffer.push_str("\",\"timestamp_alloc\":");
513            buffer.push_str(&alloc.timestamp_alloc.to_string());
514            buffer.push_str(",\"thread_id\":\"");
515            buffer.push_str(&alloc.thread_id);
516            buffer.push_str("\",\"borrow_count\":");
517            buffer.push_str(&alloc.borrow_count.to_string());
518            buffer.push_str(",\"fragmentation_analysis\":{\"status\":\"not_analyzed\"}}");
519
520            writer.write_all(buffer.as_bytes())?;
521        }
522
523        writer.write_all(b"]}}")?;
524        writer.flush()?;
525
526        Ok(())
527    }
528
529    /// Generate unsafe FFI analysis JSON directly (fast path)
530    pub fn generate_unsafe_ffi_analysis_json(
531        allocations: &[AllocationInfo],
532        output_path: &std::path::Path,
533    ) -> Result<(), BinaryExportError> {
534        use std::io::{BufWriter, Write};
535
536        let file = std::fs::File::create(output_path)?;
537        let mut writer = BufWriter::with_capacity(64 * 1024, file);
538        let mut buffer = String::with_capacity(512);
539
540        writer.write_all(b"{\"boundary_events\":[],\"enhanced_ffi_data\":[")?;
541
542        for (i, alloc) in allocations.iter().enumerate() {
543            if i > 0 {
544                writer.write_all(b",")?;
545            }
546
547            buffer.clear();
548            buffer.push_str("{\"ptr\":\"0x");
549            buffer.push_str(&format!("{:x}", alloc.ptr));
550            buffer.push_str("\",\"size\":");
551            buffer.push_str(&alloc.size.to_string());
552            buffer.push_str(",\"var_name\":\"");
553            buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
554            buffer.push_str("\",\"type_name\":\"");
555            buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
556            buffer.push_str("\",\"timestamp_alloc\":");
557            buffer.push_str(&alloc.timestamp_alloc.to_string());
558            buffer.push_str(",\"thread_id\":\"");
559            buffer.push_str(&alloc.thread_id);
560            buffer.push_str("\",\"stack_trace\":");
561            if alloc.stack_trace.is_some() {
562                buffer.push_str("[]");
563            } else {
564                buffer.push_str("[\"no_stack_trace_available\"]");
565            }
566            buffer.push_str(",\"runtime_state\":{\"status\":\"not_analyzed\"}}");
567
568            writer.write_all(buffer.as_bytes())?;
569        }
570
571        writer.write_all(b"]}")?;
572        writer.flush()?;
573
574        Ok(())
575    }
576
577    /// Generate complex types analysis JSON directly (fast path)
578    pub fn generate_complex_types_analysis_json(
579        allocations: &[AllocationInfo],
580        output_path: &std::path::Path,
581    ) -> Result<(), BinaryExportError> {
582        use std::io::{BufWriter, Write};
583
584        let file = std::fs::File::create(output_path)?;
585        let mut writer = BufWriter::with_capacity(64 * 1024, file);
586        let mut buffer = String::with_capacity(1024);
587
588        writer.write_all(b"{\"categorized_types\":{\"primitive\":[")?;
589
590        for (i, alloc) in allocations.iter().enumerate() {
591            if i > 0 {
592                writer.write_all(b",")?;
593            }
594
595            buffer.clear();
596            buffer.push_str("{\"ptr\":\"0x");
597            buffer.push_str(&format!("{:x}", alloc.ptr));
598            buffer.push_str("\",\"size\":");
599            buffer.push_str(&alloc.size.to_string());
600            buffer.push_str(",\"var_name\":\"");
601            buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
602            buffer.push_str("\",\"type_name\":\"");
603            buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
604            buffer.push_str("\",\"smart_pointer_info\":{\"type\":\"none\"}");
605            buffer.push_str(",\"memory_layout\":{\"alignment\":8}");
606            buffer.push_str(",\"generic_info\":{\"is_generic\":false}");
607            buffer.push_str(",\"dynamic_type_info\":{\"is_dynamic\":false}");
608            buffer.push_str(",\"generic_instantiation\":{\"instantiated\":false}");
609            buffer.push_str(",\"type_relationships\":{\"relationships\":[]}");
610            buffer.push_str(",\"type_usage\":{\"usage_count\":1}}");
611
612            writer.write_all(buffer.as_bytes())?;
613        }
614
615        writer.write_all(b"]}}")?;
616        writer.flush()?;
617
618        Ok(())
619    }
620
621    /// Ultra-fast JSON generation using direct streaming writes (no intermediate string allocation)
622    fn generate_json_ultra_fast(
623        allocations: &[AllocationInfo],
624        output_path: &std::path::Path,
625        json_type: &str,
626        _estimated_size: usize,
627    ) -> Result<(), BinaryExportError> {
628        use std::io::{BufWriter, Write};
629
630        let file = std::fs::File::create(output_path)?;
631        let mut writer = BufWriter::with_capacity(2 * 1024 * 1024, file); // 2MB buffer for maximum I/O performance
632
633        // Direct streaming write without intermediate string allocation
634        match json_type {
635            "memory" => {
636                writer.write_all(br#"{"data":{"allocations":["#)?;
637                for (i, alloc) in allocations.iter().enumerate() {
638                    if i > 0 {
639                        writer.write_all(b",")?;
640                    }
641                    Self::write_memory_record_direct(&mut writer, alloc)?;
642                }
643                writer.write_all(b"]}}")?;
644            }
645            "lifetime" => {
646                writer.write_all(br#"{"lifecycle_events":["#)?;
647                for (i, alloc) in allocations.iter().enumerate() {
648                    if i > 0 {
649                        writer.write_all(b",")?;
650                    }
651                    Self::write_lifetime_record_direct(&mut writer, alloc)?;
652                }
653                writer.write_all(b"]}")?;
654            }
655            "performance" => {
656                writer.write_all(br#"{"data":{"allocations":["#)?;
657                for (i, alloc) in allocations.iter().enumerate() {
658                    if i > 0 {
659                        writer.write_all(b",")?;
660                    }
661                    Self::write_performance_record_direct(&mut writer, alloc)?;
662                }
663                writer.write_all(b"]}}")?;
664            }
665            "unsafe_ffi" => {
666                writer.write_all(br#"{"boundary_events":[],"enhanced_ffi_data":["#)?;
667                for (i, alloc) in allocations.iter().enumerate() {
668                    if i > 0 {
669                        writer.write_all(b",")?;
670                    }
671                    Self::write_ffi_record_direct(&mut writer, alloc)?;
672                }
673                writer.write_all(b"]}")?;
674            }
675            "complex_types" => {
676                writer.write_all(br#"{"categorized_types":{"primitive":["#)?;
677                for (i, alloc) in allocations.iter().enumerate() {
678                    if i > 0 {
679                        writer.write_all(b",")?;
680                    }
681                    Self::write_complex_record_direct(&mut writer, alloc)?;
682                }
683                writer.write_all(b"]}}")?;
684            }
685            _ => {
686                return Err(BinaryExportError::CorruptedData(format!(
687                    "Unknown JSON type: {json_type}"
688                )))
689            }
690        }
691
692        writer.flush()?;
693        Ok(())
694    }
695
696    /// Serial optimized JSON generation for small datasets
697    /// Uses the same optimizations as parallel version but without threading overhead
698    fn generate_json_serial_optimized(
699        allocations: &[AllocationInfo],
700        output_path: &std::path::Path,
701        json_type: &str,
702        _estimated_size: usize,
703    ) -> Result<(), BinaryExportError> {
704        use std::io::{BufWriter, Write};
705
706        let file = std::fs::File::create(output_path)?;
707        // Task 7.3: Large buffer for optimal I/O performance
708        let mut writer = BufWriter::with_capacity(4 * 1024 * 1024, file);
709
710        // Task 7.2: Precise memory pre-allocation based on JSON type
711        let _estimated_record_size = match json_type {
712            "memory" => 220,
713            "lifetime" => 130,
714            "performance" => 190,
715            "unsafe_ffi" => 170,
716            "complex_types" => 320,
717            _ => 180,
718        };
719
720        // Use small buffer for chunked writing instead of giant string
721        let mut buffer = String::with_capacity(8192); // 8KB buffer for chunked writes
722
723        // Task 7.4: Ultra-fast JSON generation with chunked writing
724        match json_type {
725            "memory" => {
726                writer.write_all(br#"{"data":{"allocations":["#)?;
727                for (i, alloc) in allocations.iter().enumerate() {
728                    if i > 0 {
729                        writer.write_all(b",")?;
730                    }
731                    buffer.clear();
732                    Self::append_memory_record_optimized(&mut buffer, alloc);
733                    writer.write_all(buffer.as_bytes())?;
734                }
735                writer.write_all(b"]}}")?;
736            }
737            "lifetime" => {
738                writer.write_all(br#"{"lifecycle_events":["#)?;
739                for (i, alloc) in allocations.iter().enumerate() {
740                    if i > 0 {
741                        writer.write_all(b",")?;
742                    }
743                    buffer.clear();
744                    Self::append_lifetime_record_optimized(&mut buffer, alloc);
745                    writer.write_all(buffer.as_bytes())?;
746                }
747                writer.write_all(b"]}")?;
748            }
749            "performance" => {
750                writer.write_all(br#"{"data":{"allocations":["#)?;
751                for (i, alloc) in allocations.iter().enumerate() {
752                    if i > 0 {
753                        writer.write_all(b",")?;
754                    }
755                    buffer.clear();
756                    Self::append_performance_record_optimized(&mut buffer, alloc);
757                    writer.write_all(buffer.as_bytes())?;
758                }
759                writer.write_all(b"]}}")?;
760            }
761            "unsafe_ffi" => {
762                writer.write_all(br#"{"boundary_events":[],"enhanced_ffi_data":["#)?;
763                for (i, alloc) in allocations.iter().enumerate() {
764                    if i > 0 {
765                        writer.write_all(b",")?;
766                    }
767                    buffer.clear();
768                    Self::append_ffi_record_optimized(&mut buffer, alloc);
769                    writer.write_all(buffer.as_bytes())?;
770                }
771                writer.write_all(b"]}")?;
772            }
773            "complex_types" => {
774                writer.write_all(br#"{"categorized_types":{"primitive":["#)?;
775                for (i, alloc) in allocations.iter().enumerate() {
776                    if i > 0 {
777                        writer.write_all(b",")?;
778                    }
779                    buffer.clear();
780                    Self::append_complex_record_optimized(&mut buffer, alloc);
781                    writer.write_all(buffer.as_bytes())?;
782                }
783                writer.write_all(b"]}}")?;
784            }
785            _ => {
786                return Err(BinaryExportError::CorruptedData(format!(
787                    "Unknown JSON type: {json_type}"
788                )))
789            }
790        }
791        writer.flush()?;
792        Ok(())
793    }
794
795    /// Ultra-fast parallel JSON generation with shared data and optimized I/O
796    /// Task 7.1, 7.2, 7.3, 7.4: Implements parallel processing, precise memory allocation,
797    /// large I/O buffers, and reduced format! usage
798    fn generate_json_ultra_fast_parallel(
799        allocations: &Arc<Vec<AllocationInfo>>,
800        output_path: &std::path::Path,
801        json_type: &str,
802        _estimated_size: usize,
803    ) -> Result<(), BinaryExportError> {
804        use std::io::{BufWriter, Write};
805
806        let file = std::fs::File::create(output_path)?;
807        // Task 7.3: Increase buffer size to 8MB for maximum I/O performance
808        let mut writer = BufWriter::with_capacity(8 * 1024 * 1024, file);
809
810        // Task 7.2: Precise memory pre-allocation based on JSON type
811        let estimated_record_size = match json_type {
812            "memory" => 220, // memory_analysis: ~220 bytes per allocation (increased precision)
813            "lifetime" => 130, // lifetime: ~130 bytes per allocation
814            "performance" => 190, // performance: ~190 bytes per allocation
815            "unsafe_ffi" => 170, // unsafe_ffi: ~170 bytes per allocation
816            "complex_types" => 320, // complex_types: ~320 bytes per allocation (most complex)
817            _ => 180,
818        };
819
820        // Pre-allocate buffer with 10% extra space to avoid reallocations
821        let buffer_capacity = (allocations.len() * estimated_record_size * 110) / 100;
822        let mut buffer = String::with_capacity(buffer_capacity);
823
824        // Task 7.4: Optimized JSON generation with minimal format! usage
825        // Use direct string operations instead of format! macro where possible
826        match json_type {
827            "memory" => {
828                buffer.push_str(r#"{"data":{"allocations":["#);
829                for (i, alloc) in allocations.iter().enumerate() {
830                    if i > 0 {
831                        buffer.push(',');
832                    }
833                    Self::append_memory_record_optimized(&mut buffer, alloc);
834                }
835                buffer.push_str("]}}")
836            }
837            "lifetime" => {
838                buffer.push_str(r#"{"lifecycle_events":["#);
839                for (i, alloc) in allocations.iter().enumerate() {
840                    if i > 0 {
841                        buffer.push(',');
842                    }
843                    Self::append_lifetime_record_optimized(&mut buffer, alloc);
844                }
845                buffer.push_str("]}")
846            }
847            "performance" => {
848                buffer.push_str(r#"{"data":{"allocations":["#);
849                for (i, alloc) in allocations.iter().enumerate() {
850                    if i > 0 {
851                        buffer.push(',');
852                    }
853                    Self::append_performance_record_optimized(&mut buffer, alloc);
854                }
855                buffer.push_str("]}}")
856            }
857            "unsafe_ffi" => {
858                buffer.push_str(r#"{"boundary_events":[],"enhanced_ffi_data":["#);
859                for (i, alloc) in allocations.iter().enumerate() {
860                    if i > 0 {
861                        buffer.push(',');
862                    }
863                    Self::append_ffi_record_optimized(&mut buffer, alloc);
864                }
865                buffer.push_str("]}")
866            }
867            "complex_types" => {
868                buffer.push_str(r#"{"categorized_types":{"primitive":["#);
869                for (i, alloc) in allocations.iter().enumerate() {
870                    if i > 0 {
871                        buffer.push(',');
872                    }
873                    Self::append_complex_record_optimized(&mut buffer, alloc);
874                }
875                buffer.push_str("]}}")
876            }
877            _ => {
878                return Err(BinaryExportError::CorruptedData(format!(
879                    "Unknown JSON type: {json_type}"
880                )))
881            }
882        }
883
884        // Task 7.3: Single large write for maximum I/O performance
885        writer.write_all(buffer.as_bytes())?;
886        writer.flush()?;
887        Ok(())
888    }
889
890    #[inline]
891    fn append_memory_record(buffer: &mut String, alloc: &AllocationInfo) {
892        buffer.push_str(r#"{"ptr":"0x"#);
893        Self::append_hex(buffer, alloc.ptr);
894        buffer.push_str(r#"","size":"#);
895        Self::append_usize(buffer, alloc.size);
896        buffer.push_str(r#","var_name":""#);
897        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
898        buffer.push_str(r#"","type_name":""#);
899        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
900        buffer.push_str(r#"","scope_name":""#);
901        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
902        buffer.push_str(r#"","timestamp_alloc":"#);
903        Self::append_number(buffer, alloc.timestamp_alloc);
904        buffer.push_str(r#","thread_id":""#);
905        buffer.push_str(&alloc.thread_id);
906        buffer.push_str(r#"","borrow_count":"#);
907        Self::append_usize(buffer, alloc.borrow_count);
908        buffer.push_str(r#","is_leaked":"#);
909        buffer.push_str(if alloc.is_leaked { "true" } else { "false" });
910        buffer.push('}');
911    }
912
913    #[inline]
914    fn append_lifetime_record(buffer: &mut String, alloc: &AllocationInfo) {
915        buffer.push_str(r#"{"event":"allocation","ptr":"0x"#);
916        Self::append_hex(buffer, alloc.ptr);
917        buffer.push_str(r#"","scope":""#);
918        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
919        buffer.push_str(r#"","size":"#);
920        Self::append_usize(buffer, alloc.size);
921        buffer.push_str(r#","timestamp":"#);
922        Self::append_number(buffer, alloc.timestamp_alloc);
923        buffer.push_str(r#","type_name":""#);
924        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
925        buffer.push_str(r#"","var_name":""#);
926        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
927        buffer.push_str("\"}");
928    }
929
930    #[inline]
931    fn append_performance_record(buffer: &mut String, alloc: &AllocationInfo) {
932        buffer.push_str(r#"{"ptr":"0x"#);
933        Self::append_hex(buffer, alloc.ptr);
934        buffer.push_str(r#"","size":"#);
935        Self::append_usize(buffer, alloc.size);
936        buffer.push_str(r#","var_name":""#);
937        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
938        buffer.push_str(r#"","type_name":""#);
939        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
940        buffer.push_str(r#"","timestamp_alloc":"#);
941        Self::append_number(buffer, alloc.timestamp_alloc);
942        buffer.push_str(r#","thread_id":""#);
943        buffer.push_str(&alloc.thread_id);
944        buffer.push_str(r#"","borrow_count":"#);
945        Self::append_usize(buffer, alloc.borrow_count);
946        buffer.push_str(r#","fragmentation_analysis":{"status":"not_analyzed"}}"#);
947    }
948
949    #[inline]
950    fn append_ffi_record(buffer: &mut String, alloc: &AllocationInfo) {
951        buffer.push_str(r#"{"ptr":"0x"#);
952        Self::append_hex(buffer, alloc.ptr);
953        buffer.push_str(r#"","size":"#);
954        Self::append_usize(buffer, alloc.size);
955        buffer.push_str(r#","var_name":""#);
956        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
957        buffer.push_str(r#"","type_name":""#);
958        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
959        buffer.push_str(r#"","timestamp_alloc":"#);
960        Self::append_number(buffer, alloc.timestamp_alloc);
961        buffer.push_str(r#","thread_id":""#);
962        buffer.push_str(&alloc.thread_id);
963        buffer.push_str(r#"","stack_trace":["rust_main_thread"],"runtime_state":{"status":"safe","boundary_crossings":0}}"#);
964    }
965
966    #[inline]
967    fn append_complex_record(buffer: &mut String, alloc: &AllocationInfo) {
968        buffer.push_str(r#"{"ptr":"0x"#);
969        Self::append_hex(buffer, alloc.ptr);
970        buffer.push_str(r#"","size":"#);
971        Self::append_usize(buffer, alloc.size);
972        buffer.push_str(r#","var_name":""#);
973        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
974        buffer.push_str(r#"","type_name":""#);
975        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
976        buffer.push_str(r#"","smart_pointer_info":{"type":"raw_pointer","is_smart":false},"memory_layout":{"alignment":8,"size_class":"medium"},"generic_info":{"is_generic":false,"type_params":[]},"dynamic_type_info":{"is_dynamic":false,"vtable_ptr":0},"generic_instantiation":{"instantiated":true,"template_args":[]},"type_relationships":{"parent_types":[],"child_types":[]},"type_usage":{"usage_count":1,"access_pattern":"sequential"}}"#);
977    }
978
979    // PERFORMANCE OPTIMIZATION: Removed infer_type_name and infer_variable_name functions
980    // These functions were causing 8384ms performance bottleneck by doing complex inference
981    // calculations for 1000+ allocations. Now we use direct field access for maximum speed.
982    // Requirement 21: Full-binary mode guarantees no null fields, so direct access is safe.
983
984    #[inline]
985    fn append_hex(buffer: &mut String, value: usize) {
986        // Fast hex conversion without format! macro
987        const HEX_CHARS: &[u8] = b"0123456789abcdef";
988        let mut temp = [0u8; 16]; // Enough for 64-bit hex
989        let mut i = 0;
990        let mut val = value;
991
992        if val == 0 {
993            buffer.push('0');
994            return;
995        }
996
997        while val > 0 {
998            temp[i] = HEX_CHARS[val & 0xf];
999            val >>= 4;
1000            i += 1;
1001        }
1002
1003        // Reverse and append
1004        for j in (0..i).rev() {
1005            buffer.push(temp[j] as char);
1006        }
1007    }
1008
1009    #[inline]
1010    fn append_number(buffer: &mut String, value: u64) {
1011        // Fast number to string conversion without format! macro
1012        if value == 0 {
1013            buffer.push('0');
1014            return;
1015        }
1016
1017        let mut temp = [0u8; 20]; // Enough for 64-bit number
1018        let mut i = 0;
1019        let mut val = value;
1020
1021        while val > 0 {
1022            temp[i] = b'0' + (val % 10) as u8;
1023            val /= 10;
1024            i += 1;
1025        }
1026
1027        // Reverse and append
1028        for j in (0..i).rev() {
1029            buffer.push(temp[j] as char);
1030        }
1031    }
1032
1033    #[inline]
1034    fn append_usize(buffer: &mut String, value: usize) {
1035        Self::append_number(buffer, value as u64);
1036    }
1037
1038    /// Task 7.4: Ultra-fast memory record generation - eliminated inference calls
1039    /// Performance optimization: Removed infer_type_name and infer_variable_name calls
1040    /// Requirement 21: Full-binary mode guarantees no null fields, direct access is safe
1041    #[inline]
1042    fn append_memory_record_optimized(buffer: &mut String, alloc: &AllocationInfo) {
1043        // Use direct string operations instead of format! for better performance
1044        buffer.push_str(r#"{"ptr":"0x"#);
1045        Self::append_hex(buffer, alloc.ptr);
1046        buffer.push_str(r#"","size":"#);
1047        Self::append_usize(buffer, alloc.size);
1048        buffer.push_str(r#","var_name":""#);
1049        // Direct access - use stored data when available, simple defaults when missing
1050        buffer.push_str(alloc.var_name.as_deref().unwrap_or("system_alloc"));
1051        buffer.push_str(r#"","type_name":""#);
1052        buffer.push_str(alloc.type_name.as_deref().unwrap_or("system_type"));
1053        buffer.push_str(r#"","scope_name":""#);
1054        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
1055        buffer.push_str(r#"","timestamp_alloc":"#);
1056        Self::append_number(buffer, alloc.timestamp_alloc);
1057        buffer.push_str(r#","thread_id":""#);
1058        buffer.push_str(&alloc.thread_id);
1059        buffer.push_str(r#"","borrow_count":"#);
1060        Self::append_usize(buffer, alloc.borrow_count);
1061        buffer.push_str(r#","is_leaked":"#);
1062        buffer.push_str(if alloc.is_leaked { "true" } else { "false" });
1063        buffer.push('}');
1064    }
1065
1066    /// Task 7.4: Ultra-fast lifetime record generation - eliminated inference calls
1067    #[inline]
1068    fn append_lifetime_record_optimized(buffer: &mut String, alloc: &AllocationInfo) {
1069        buffer.push_str(r#"{"event":"allocation","ptr":"0x"#);
1070        Self::append_hex(buffer, alloc.ptr);
1071        buffer.push_str(r#"","scope":""#);
1072        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
1073        buffer.push_str(r#"","size":"#);
1074        Self::append_usize(buffer, alloc.size);
1075        buffer.push_str(r#","timestamp":"#);
1076        Self::append_number(buffer, alloc.timestamp_alloc);
1077        buffer.push_str(r#","type_name":""#);
1078        buffer.push_str(alloc.type_name.as_deref().unwrap_or("system_type"));
1079        buffer.push_str(r#"","var_name":""#);
1080        buffer.push_str(alloc.var_name.as_deref().unwrap_or("system_alloc"));
1081        buffer.push_str("\"}");
1082    }
1083
1084    /// Task 7.4: Ultra-fast performance record generation - eliminated inference calls
1085    #[inline]
1086    fn append_performance_record_optimized(buffer: &mut String, alloc: &AllocationInfo) {
1087        buffer.push_str(r#"{"ptr":"0x"#);
1088        Self::append_hex(buffer, alloc.ptr);
1089        buffer.push_str(r#"","size":"#);
1090        Self::append_usize(buffer, alloc.size);
1091        buffer.push_str(r#","var_name":""#);
1092        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1093        buffer.push_str(r#"","type_name":""#);
1094        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1095        buffer.push_str(r#"","timestamp_alloc":"#);
1096        Self::append_number(buffer, alloc.timestamp_alloc);
1097        buffer.push_str(r#","thread_id":""#);
1098        buffer.push_str(&alloc.thread_id);
1099        buffer.push_str(r#"","borrow_count":"#);
1100        Self::append_usize(buffer, alloc.borrow_count);
1101        buffer.push_str(r#","fragmentation_analysis":{"status":"not_analyzed"}}"#);
1102    }
1103
1104    /// Task 7.4: Ultra-fast FFI record generation - eliminated inference calls
1105    #[inline]
1106    fn append_ffi_record_optimized(buffer: &mut String, alloc: &AllocationInfo) {
1107        buffer.push_str(r#"{"ptr":"0x"#);
1108        Self::append_hex(buffer, alloc.ptr);
1109        buffer.push_str(r#"","size":"#);
1110        Self::append_usize(buffer, alloc.size);
1111        buffer.push_str(r#","var_name":""#);
1112        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1113        buffer.push_str(r#"","type_name":""#);
1114        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1115        buffer.push_str(r#"","timestamp_alloc":"#);
1116        Self::append_number(buffer, alloc.timestamp_alloc);
1117        buffer.push_str(r#","thread_id":""#);
1118        buffer.push_str(&alloc.thread_id);
1119        buffer.push_str(r#"","stack_trace":["rust_main_thread"],"runtime_state":{"status":"safe","boundary_crossings":0}}"#);
1120    }
1121
1122    /// Task 7.4: Ultra-fast complex types record generation - eliminated inference calls
1123    #[inline]
1124    fn append_complex_record_optimized(buffer: &mut String, alloc: &AllocationInfo) {
1125        buffer.push_str(r#"{"ptr":"0x"#);
1126        Self::append_hex(buffer, alloc.ptr);
1127        buffer.push_str(r#"","size":"#);
1128        Self::append_usize(buffer, alloc.size);
1129        buffer.push_str(r#","var_name":""#);
1130        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1131        buffer.push_str(r#"","type_name":""#);
1132        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1133        buffer.push_str(r#"","smart_pointer_info":{"type":"raw_pointer","is_smart":false},"memory_layout":{"alignment":8,"size_class":"medium"},"generic_info":{"is_generic":false,"type_params":[]},"dynamic_type_info":{"is_dynamic":false,"vtable_ptr":0},"generic_instantiation":{"instantiated":true,"template_args":[]},"type_relationships":{"parent_types":[],"child_types":[]},"type_usage":{"usage_count":1,"access_pattern":"sequential"}}"#);
1134    }
1135
1136    /// Direct write memory record without string allocation - ultra-fast string building
1137    #[inline]
1138    fn write_memory_record_direct<W: std::io::Write>(
1139        writer: &mut W,
1140        alloc: &AllocationInfo,
1141    ) -> Result<(), BinaryExportError> {
1142        // Pre-allocate buffer for maximum performance
1143        let mut buffer = String::with_capacity(512);
1144
1145        // Direct string building without format! macro
1146        buffer.push_str(r#"{"ptr":"0x"#);
1147        Self::append_hex(&mut buffer, alloc.ptr);
1148        buffer.push_str(r#"","size":"#);
1149        Self::append_usize(&mut buffer, alloc.size);
1150        buffer.push_str(r#","var_name":""#);
1151        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1152        buffer.push_str(r#"","type_name":""#);
1153        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1154        buffer.push_str(r#"","scope_name":""#);
1155        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
1156        buffer.push_str(r#"","timestamp_alloc":"#);
1157        Self::append_number(&mut buffer, alloc.timestamp_alloc);
1158        buffer.push_str(r#","thread_id":""#);
1159        buffer.push_str(&alloc.thread_id);
1160        buffer.push_str(r#"","borrow_count":"#);
1161        Self::append_usize(&mut buffer, alloc.borrow_count);
1162        buffer.push_str(r#","is_leaked":"#);
1163        buffer.push_str(if alloc.is_leaked { "true" } else { "false" });
1164        buffer.push_str(r#","lifetime_ms":0,"smart_pointer_info":{"data_ptr":0,"ref_count":1},"memory_layout":{"alignment":8,"size":"#);
1165        Self::append_usize(&mut buffer, alloc.size);
1166        buffer.push_str("}}");
1167
1168        writer.write_all(buffer.as_bytes())?;
1169        Ok(())
1170    }
1171
1172    /// Direct write lifetime record without string allocation - ultra-fast string building
1173    #[inline]
1174    fn write_lifetime_record_direct<W: std::io::Write>(
1175        writer: &mut W,
1176        alloc: &AllocationInfo,
1177    ) -> Result<(), BinaryExportError> {
1178        let mut buffer = String::with_capacity(256);
1179
1180        buffer.push_str(r#"{"event":"allocation","ptr":"0x"#);
1181        Self::append_hex(&mut buffer, alloc.ptr);
1182        buffer.push_str(r#"","scope":""#);
1183        buffer.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
1184        buffer.push_str(r#"","size":"#);
1185        Self::append_usize(&mut buffer, alloc.size);
1186        buffer.push_str(r#","timestamp":"#);
1187        Self::append_number(&mut buffer, alloc.timestamp_alloc);
1188        buffer.push_str(r#","type_name":""#);
1189        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1190        buffer.push_str(r#"","var_name":""#);
1191        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1192        buffer.push_str("\"}");
1193
1194        writer.write_all(buffer.as_bytes())?;
1195        Ok(())
1196    }
1197
1198    /// Direct write performance record without string allocation - ultra-fast string building
1199    #[inline]
1200    fn write_performance_record_direct<W: std::io::Write>(
1201        writer: &mut W,
1202        alloc: &AllocationInfo,
1203    ) -> Result<(), BinaryExportError> {
1204        let mut buffer = String::with_capacity(384);
1205
1206        buffer.push_str(r#"{"ptr":"0x"#);
1207        Self::append_hex(&mut buffer, alloc.ptr);
1208        buffer.push_str(r#"","size":"#);
1209        Self::append_usize(&mut buffer, alloc.size);
1210        buffer.push_str(r#","var_name":""#);
1211        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1212        buffer.push_str(r#"","type_name":""#);
1213        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1214        buffer.push_str(r#"","timestamp_alloc":"#);
1215        Self::append_number(&mut buffer, alloc.timestamp_alloc);
1216        buffer.push_str(r#","thread_id":""#);
1217        buffer.push_str(&alloc.thread_id);
1218        buffer.push_str(r#"","borrow_count":"#);
1219        Self::append_usize(&mut buffer, alloc.borrow_count);
1220        buffer.push_str(r#","fragmentation_analysis":{"status":"not_analyzed"}}"#);
1221
1222        writer.write_all(buffer.as_bytes())?;
1223        Ok(())
1224    }
1225
1226    /// Direct write FFI record without string allocation - ultra-fast string building
1227    #[inline]
1228    fn write_ffi_record_direct<W: std::io::Write>(
1229        writer: &mut W,
1230        alloc: &AllocationInfo,
1231    ) -> Result<(), BinaryExportError> {
1232        let mut buffer = String::with_capacity(320);
1233
1234        buffer.push_str(r#"{"ptr":"0x"#);
1235        Self::append_hex(&mut buffer, alloc.ptr);
1236        buffer.push_str(r#"","size":"#);
1237        Self::append_usize(&mut buffer, alloc.size);
1238        buffer.push_str(r#","var_name":""#);
1239        buffer.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1240        buffer.push_str(r#"","type_name":""#);
1241        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1242        buffer.push_str(r#"","timestamp_alloc":"#);
1243        Self::append_number(&mut buffer, alloc.timestamp_alloc);
1244        buffer.push_str(r#","thread_id":""#);
1245        buffer.push_str(&alloc.thread_id);
1246        buffer.push_str(r#"","stack_trace":["rust_main_thread"],"runtime_state":{"status":"safe","boundary_crossings":0}}"#);
1247
1248        writer.write_all(buffer.as_bytes())?;
1249        Ok(())
1250    }
1251
1252    /// Direct write complex types record without string allocation - ultra-fast string building
1253    #[inline]
1254    fn write_complex_record_direct<W: std::io::Write>(
1255        writer: &mut W,
1256        alloc: &AllocationInfo,
1257    ) -> Result<(), BinaryExportError> {
1258        let mut buffer = String::with_capacity(256);
1259
1260        buffer.push_str(r#"{"allocation_id":"#);
1261        Self::append_usize(&mut buffer, alloc.ptr);
1262        buffer.push_str(r#","type_name":""#);
1263        buffer.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1264        buffer.push_str(r#"","category":"primitive","complexity_score":1,"memory_layout":{"alignment":8},"generic_info":{"is_generic":false}}"#);
1265
1266        writer.write_all(buffer.as_bytes())?;
1267        Ok(())
1268    }
1269
1270    /// **[New Interface]** Parse binary to JSON using BinaryIndex for maximum performance
1271    ///
1272    /// This is the core high-performance interface that uses BinaryIndex for direct data access,
1273    /// avoiding the overhead of loading all allocations into memory.
1274    pub fn parse_binary_to_json_with_index<P: AsRef<Path>>(
1275        binary_path: P,
1276        base_name: &str,
1277    ) -> Result<(), BinaryExportError> {
1278        use crate::export::binary::BinaryReader;
1279
1280        let start = std::time::Instant::now();
1281        let binary_path = binary_path.as_ref();
1282
1283        tracing::info!("📊 Using BinaryReader for direct data access");
1284
1285        // Step 1: Create reader for efficient access (no need for BinaryIndex)
1286        let index_start = std::time::Instant::now();
1287        let mut reader = BinaryReader::new(binary_path)?;
1288        let _header = reader.read_header()?;
1289        let index_time = index_start.elapsed();
1290        tracing::info!("✅ Opened binary reader in {}ms", index_time.as_millis());
1291
1292        // Step 2: Create output directory
1293        let base_memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
1294        let project_dir = base_memory_analysis_dir.join(base_name);
1295        std::fs::create_dir_all(&project_dir)?;
1296
1297        // Step 3: Generate JSON files using BinaryIndex streaming
1298        let json_start = std::time::Instant::now();
1299
1300        let file_paths = [
1301            (
1302                project_dir.join(format!("{base_name}_memory_analysis.json")),
1303                "memory",
1304            ),
1305            (
1306                project_dir.join(format!("{base_name}_lifetime.json")),
1307                "lifetime",
1308            ),
1309            (
1310                project_dir.join(format!("{base_name}_performance.json")),
1311                "performance",
1312            ),
1313            (
1314                project_dir.join(format!("{base_name}_unsafe_ffi.json")),
1315                "unsafe_ffi",
1316            ),
1317            (
1318                project_dir.join(format!("{base_name}_complex_types.json")),
1319                "complex_types",
1320            ),
1321        ];
1322
1323        // 🔥 CRITICAL FIX: Use sequential processing to avoid file access conflicts
1324        // Multiple threads accessing the same binary file simultaneously causes abort/SIGABRT
1325        for (path, json_type) in &file_paths {
1326            Self::generate_json_with_reader(binary_path, path, json_type)?;
1327        }
1328
1329        let json_time = json_start.elapsed();
1330        tracing::info!(
1331            "✅ Generated 5 JSON files using BinaryReader in {}ms",
1332            json_time.as_millis()
1333        );
1334
1335        let total_time = start.elapsed();
1336        tracing::info!(
1337            "📊 Total BinaryReader conversion time: {}ms",
1338            total_time.as_millis()
1339        );
1340
1341        Ok(())
1342    }
1343
1344    /// Generate JSON file using BinaryReader for streaming access
1345    fn generate_json_with_reader(
1346        binary_path: &std::path::Path,
1347        output_path: &std::path::Path,
1348        json_type: &str,
1349    ) -> Result<(), BinaryExportError> {
1350        use std::io::{BufWriter, Write};
1351
1352        let file = std::fs::File::create(output_path)?;
1353        let mut writer = BufWriter::with_capacity(2 * 1024 * 1024, file); // 2MB buffer
1354
1355        // Open reader for streaming access
1356        let mut reader = BinaryReader::new(binary_path)?;
1357        let header = reader.read_header()?;
1358
1359        // Write JSON header - unified format for consistency
1360        match json_type {
1361            "memory" => writer.write_all(b"{\"allocations\":[")?,
1362            "lifetime" => writer.write_all(b"{\"lifecycle_events\":[")?,
1363            "performance" => writer.write_all(b"{\"allocations\":[")?,
1364            "unsafe_ffi" => writer.write_all(b"{\"allocations\":[")?,
1365            "complex_types" => writer.write_all(b"{\"allocations\":[")?,
1366            _ => {
1367                return Err(BinaryExportError::CorruptedData(format!(
1368                    "Unknown JSON type: {json_type}"
1369                )))
1370            }
1371        }
1372
1373        // Stream allocations directly from reader
1374        let total_count = header.total_count;
1375        let mut buffer = String::with_capacity(512);
1376
1377        for i in 0..total_count {
1378            if i > 0 {
1379                writer.write_all(b",")?;
1380            }
1381
1382            // Read allocation sequentially (most efficient for binary files)
1383            let allocation = reader.read_allocation()?;
1384
1385            // Generate JSON record with unified base format + specific analysis fields
1386            buffer.clear();
1387            match json_type {
1388                "memory" => Self::append_unified_record(&mut buffer, &allocation, "memory"),
1389                "lifetime" => Self::append_unified_record(&mut buffer, &allocation, "lifetime"),
1390                "performance" => {
1391                    Self::append_unified_record(&mut buffer, &allocation, "performance")
1392                }
1393                "unsafe_ffi" => Self::append_unified_record(&mut buffer, &allocation, "unsafe_ffi"),
1394                "complex_types" => {
1395                    Self::append_unified_record(&mut buffer, &allocation, "complex_types")
1396                }
1397                _ => unreachable!(),
1398            }
1399
1400            writer.write_all(buffer.as_bytes())?;
1401        }
1402
1403        // Write JSON footer - simplified and unified
1404        match json_type {
1405            "memory" => {
1406                writer.write_all(
1407                    b"],\"metadata\":{\"analysis_type\":\"memory_analysis\",\"total_allocations\":",
1408                )?;
1409                writer.write_all(total_count.to_string().as_bytes())?;
1410                writer.write_all(b",\"export_version\":\"2.0\"}}")?;
1411            }
1412            "lifetime" => {
1413                writer.write_all(
1414                    b"],\"metadata\":{\"analysis_type\":\"lifecycle_analysis\",\"total_events\":",
1415                )?;
1416                writer.write_all(total_count.to_string().as_bytes())?;
1417                writer.write_all(b",\"export_version\":\"2.0\"}}")?;
1418            }
1419            "performance" => {
1420                writer.write_all(b"],\"metadata\":{\"analysis_type\":\"performance_analysis\",\"total_allocations\":")?;
1421                writer.write_all(total_count.to_string().as_bytes())?;
1422                writer.write_all(b",\"export_version\":\"2.0\"}}")?;
1423            }
1424            "unsafe_ffi" => {
1425                writer.write_all(b"],\"metadata\":{\"analysis_type\":\"unsafe_ffi_analysis\",\"total_allocations\":")?;
1426                writer.write_all(total_count.to_string().as_bytes())?;
1427                writer.write_all(b",\"export_version\":\"2.0\"}}")?;
1428            }
1429            "complex_types" => {
1430                writer.write_all(b"],\"metadata\":{\"analysis_type\":\"complex_types_analysis\",\"total_allocations\":")?;
1431                writer.write_all(total_count.to_string().as_bytes())?;
1432                writer.write_all(b",\"export_version\":\"2.0\"}}")?;
1433            }
1434            _ => unreachable!(),
1435        }
1436
1437        writer.flush()?;
1438        Ok(())
1439    }
1440
1441    /// Convert using FastExportCoordinator for large datasets
1442    fn convert_using_fast_coordinator(
1443        allocations: &[AllocationInfo],
1444        base_name: &str,
1445    ) -> Result<(), BinaryExportError> {
1446        use crate::export::fast_export_coordinator::{
1447            FastExportConfigBuilder, FastExportCoordinator,
1448        };
1449
1450        let coordinator_start = std::time::Instant::now();
1451
1452        // Configure FastExportCoordinator for optimal performance
1453        let config = FastExportConfigBuilder::new()
1454            .shard_size(2000) // Larger shards for better throughput
1455            .buffer_size(2 * 1024 * 1024) // 2MB buffer (reasonable size)
1456            .parallel_threshold(500) // Lower threshold for parallel processing
1457            .max_threads(Some(
1458                std::thread::available_parallelism()
1459                    .map(|p| p.get())
1460                    .unwrap_or(4),
1461            ))
1462            .performance_monitoring(true)
1463            .verbose_logging(false) // Disable verbose logging for speed
1464            .build();
1465
1466        let mut coordinator = FastExportCoordinator::new(config);
1467
1468        // Create output directory structure
1469        let base_memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
1470        let project_dir = base_memory_analysis_dir.join(base_name);
1471        std::fs::create_dir_all(&project_dir)?;
1472
1473        // Use FastExportCoordinator's export_fast method
1474        let output_path = project_dir.join(format!("{base_name}_memory_analysis.json"));
1475
1476        match coordinator.export_fast(&output_path) {
1477            Ok(stats) => {
1478                tracing::info!(
1479                    "✅ FastExportCoordinator completed: {}ms, {} allocations, {:.2}x improvement",
1480                    stats.total_export_time_ms,
1481                    stats.total_allocations_processed,
1482                    stats.performance_improvement_factor
1483                );
1484
1485                // Generate additional JSON files using fast methods
1486                Self::generate_additional_json_files_fast(allocations, base_name, &project_dir)?;
1487            }
1488            Err(e) => {
1489                tracing::warn!(
1490                    "⚠️ FastExportCoordinator failed, falling back to direct method: {}",
1491                    e
1492                );
1493                Self::convert_using_optimized_json_export(allocations, base_name)?;
1494            }
1495        }
1496
1497        let coordinator_time = coordinator_start.elapsed();
1498        tracing::info!(
1499            "📊 FastExportCoordinator total time: {}ms",
1500            coordinator_time.as_millis()
1501        );
1502
1503        Ok(())
1504    }
1505
1506    /// Convert using OptimizedJsonExport for smaller datasets
1507    fn convert_using_optimized_json_export(
1508        allocations: &[AllocationInfo],
1509        base_name: &str,
1510    ) -> Result<(), BinaryExportError> {
1511        use crate::core::tracker::export_json::ExportJsonOptions;
1512        use crate::core::tracker::MemoryTracker;
1513        use crate::export::optimized_json_export::OptimizationLevel;
1514
1515        let export_start = std::time::Instant::now();
1516
1517        // Create a temporary MemoryTracker with our allocations
1518        let tracker = MemoryTracker::new();
1519
1520        // Configure for maximum speed using ExportJsonOptions
1521        let options = ExportJsonOptions::with_optimization_level(OptimizationLevel::Low)
1522            .parallel_processing(true)
1523            .buffer_size(2 * 1024 * 1024) // 2MB buffer
1524            .fast_export_mode(true)
1525            .schema_validation(false); // Disable validation for speed
1526
1527        // Use the optimized export method
1528        match tracker.export_to_json_with_options(base_name, options) {
1529            Ok(_) => {
1530                tracing::info!("✅ OptimizedJsonExport completed successfully");
1531            }
1532            Err(e) => {
1533                tracing::warn!("⚠️ OptimizedJsonExport failed, using fallback: {}", e);
1534                Self::generate_json_files_direct_fallback(allocations, base_name)?;
1535            }
1536        }
1537
1538        let export_time = export_start.elapsed();
1539        tracing::info!(
1540            "📊 OptimizedJsonExport total time: {}ms",
1541            export_time.as_millis()
1542        );
1543
1544        Ok(())
1545    }
1546
1547    /// Generate additional JSON files using fast methods
1548    fn generate_additional_json_files_fast(
1549        allocations: &[AllocationInfo],
1550        base_name: &str,
1551        project_dir: &std::path::Path,
1552    ) -> Result<(), BinaryExportError> {
1553        use crate::export::high_speed_buffered_writer::{
1554            HighSpeedBufferedWriter, HighSpeedWriterConfig,
1555        };
1556
1557        let additional_start = std::time::Instant::now();
1558
1559        // Configure high-speed writer
1560        let writer_config = HighSpeedWriterConfig {
1561            buffer_size: 2 * 1024 * 1024, // 2MB buffer
1562            enable_monitoring: false,     // Disable monitoring for speed
1563            auto_flush: true,
1564            estimated_total_size: Some(allocations.len() * 200), // Estimate 200 bytes per allocation
1565            enable_compression: false,                           // Disable compression for speed
1566        };
1567
1568        // Generate remaining 4 JSON files in parallel
1569        use rayon::prelude::*;
1570
1571        let file_tasks = vec![
1572            ("lifetime", "lifetime_analysis"),
1573            ("performance", "performance_analysis"),
1574            ("unsafe_ffi", "unsafe_ffi_analysis"),
1575            ("complex_types", "complex_types_analysis"),
1576        ];
1577
1578        let results: Result<Vec<()>, BinaryExportError> = file_tasks
1579            .par_iter()
1580            .map(|(file_type, analysis_type)| {
1581                let file_path = project_dir.join(format!("{base_name}_{file_type}.json"));
1582                let mut writer = HighSpeedBufferedWriter::new(&file_path, writer_config.clone())
1583                    .map_err(|e| BinaryExportError::Io(std::io::Error::other(e.to_string())))?;
1584
1585                // Generate JSON content directly
1586                let json_content = Self::generate_json_content_fast(allocations, analysis_type)?;
1587
1588                // Write using high-speed writer's custom JSON method
1589                writer
1590                    .write_custom_json(json_content.as_bytes())
1591                    .map_err(|e| BinaryExportError::Io(std::io::Error::other(e.to_string())))?;
1592
1593                Ok(())
1594            })
1595            .collect();
1596
1597        results?;
1598
1599        let additional_time = additional_start.elapsed();
1600        tracing::info!(
1601            "📊 Additional files generated in: {}ms",
1602            additional_time.as_millis()
1603        );
1604
1605        Ok(())
1606    }
1607
1608    /// Generate JSON content using fast string building
1609    fn generate_json_content_fast(
1610        allocations: &[AllocationInfo],
1611        analysis_type: &str,
1612    ) -> Result<String, BinaryExportError> {
1613        // Pre-allocate string with estimated size
1614        let estimated_size = allocations.len() * 150 + 1024; // 150 bytes per allocation + overhead
1615        let mut content = String::with_capacity(estimated_size);
1616
1617        match analysis_type {
1618            "lifetime_analysis" => {
1619                content.push_str(r#"{"lifecycle_events":["#);
1620                for (i, alloc) in allocations.iter().enumerate() {
1621                    if i > 0 {
1622                        content.push(',');
1623                    }
1624                    content.push_str(r#"{"event":"allocation","ptr":"0x"#);
1625                    Self::append_hex_to_string(&mut content, alloc.ptr);
1626                    content.push_str(r#"","scope":""#);
1627                    content.push_str(alloc.scope_name.as_deref().unwrap_or("global"));
1628                    content.push_str(r#"","size":"#);
1629                    Self::append_number_to_string(&mut content, alloc.size as u64);
1630                    content.push_str(r#","timestamp":"#);
1631                    Self::append_number_to_string(&mut content, alloc.timestamp_alloc);
1632                    content.push_str(r#","type_name":""#);
1633                    content.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1634                    content.push_str(r#"","var_name":""#);
1635                    content.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1636                    content.push_str(r#""}"#);
1637                }
1638                content.push_str("]}");
1639            }
1640            "performance_analysis" => {
1641                content.push_str(r#"{"data":{"allocations":["#);
1642                for (i, alloc) in allocations.iter().enumerate() {
1643                    if i > 0 {
1644                        content.push(',');
1645                    }
1646                    content.push_str(r#"{"ptr":"0x"#);
1647                    Self::append_hex_to_string(&mut content, alloc.ptr);
1648                    content.push_str(r#"","size":"#);
1649                    Self::append_number_to_string(&mut content, alloc.size as u64);
1650                    content.push_str(r#","var_name":""#);
1651                    content.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1652                    content.push_str(r#"","type_name":""#);
1653                    content.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1654                    content.push_str(r#"","timestamp_alloc":"#);
1655                    Self::append_number_to_string(&mut content, alloc.timestamp_alloc);
1656                    content.push_str(r#","thread_id":""#);
1657                    content.push_str(&alloc.thread_id);
1658                    content.push_str(r#"","borrow_count":"#);
1659                    Self::append_number_to_string(&mut content, alloc.borrow_count as u64);
1660                    content.push_str(r#","fragmentation_analysis":{"status":"not_analyzed"}}"#);
1661                }
1662                content.push_str("]}");
1663            }
1664            "unsafe_ffi_analysis" => {
1665                content.push_str(r#"{"boundary_events":[],"enhanced_ffi_data":["#);
1666                for (i, alloc) in allocations.iter().enumerate() {
1667                    if i > 0 {
1668                        content.push(',');
1669                    }
1670                    content.push_str(r#"{"ptr":"0x"#);
1671                    Self::append_hex_to_string(&mut content, alloc.ptr);
1672                    content.push_str(r#"","size":"#);
1673                    Self::append_number_to_string(&mut content, alloc.size as u64);
1674                    content.push_str(r#","var_name":""#);
1675                    content.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1676                    content.push_str(r#"","type_name":""#);
1677                    content.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1678                    content.push_str(r#"","timestamp_alloc":"#);
1679                    Self::append_number_to_string(&mut content, alloc.timestamp_alloc);
1680                    content.push_str(r#","thread_id":""#);
1681                    content.push_str(&alloc.thread_id);
1682                    content.push_str(r#"","stack_trace":["rust_main_thread"],"runtime_state":{"status":"safe","boundary_crossings":0}}"#);
1683                }
1684                content.push_str("]}");
1685            }
1686            "complex_types_analysis" => {
1687                content.push_str(r#"{"categorized_types":{"primitive":["#);
1688                for (i, alloc) in allocations.iter().enumerate() {
1689                    if i > 0 {
1690                        content.push(',');
1691                    }
1692                    content.push_str(r#"{"ptr":"0x"#);
1693                    Self::append_hex_to_string(&mut content, alloc.ptr);
1694                    content.push_str(r#"","size":"#);
1695                    Self::append_number_to_string(&mut content, alloc.size as u64);
1696                    content.push_str(r#","var_name":""#);
1697                    content.push_str(alloc.var_name.as_deref().unwrap_or("unknown_var"));
1698                    content.push_str(r#"","type_name":""#);
1699                    content.push_str(alloc.type_name.as_deref().unwrap_or("unknown_type"));
1700                    content.push_str(r#"","smart_pointer_info":{"type":"raw_pointer","is_smart":false},"memory_layout":{"alignment":8,"size_class":"medium"},"generic_info":{"is_generic":false,"type_params":[]},"dynamic_type_info":{"is_dynamic":false,"vtable_ptr":0},"generic_instantiation":{"instantiated":true,"template_args":[]},"type_relationships":{"parent_types":[],"child_types":[]},"type_usage":{"usage_count":1,"access_pattern":"sequential"}}"#);
1701                }
1702                content.push_str("]}");
1703            }
1704            _ => {
1705                return Err(BinaryExportError::CorruptedData(format!(
1706                    "Unknown analysis type: {analysis_type}",
1707                )));
1708            }
1709        }
1710
1711        Ok(content)
1712    }
1713
1714    /// Direct fallback method for JSON generation
1715    fn generate_json_files_direct_fallback(
1716        allocations: &[AllocationInfo],
1717        base_name: &str,
1718    ) -> Result<(), BinaryExportError> {
1719        let fallback_start = std::time::Instant::now();
1720        tracing::info!("🔧 Using direct fallback method for JSON generation");
1721
1722        // Create output directory
1723        let base_memory_analysis_dir = std::path::Path::new("MemoryAnalysis");
1724        let project_dir = base_memory_analysis_dir.join(base_name);
1725        std::fs::create_dir_all(&project_dir)?;
1726
1727        // Generate all 5 JSON files using direct methods
1728        let file_paths = [
1729            (
1730                project_dir.join(format!("{base_name}_memory_analysis.json")),
1731                "memory",
1732            ),
1733            (
1734                project_dir.join(format!("{base_name}_lifetime.json")),
1735                "lifetime",
1736            ),
1737            (
1738                project_dir.join(format!("{base_name}_performance.json")),
1739                "performance",
1740            ),
1741            (
1742                project_dir.join(format!("{base_name}_unsafe_ffi.json")),
1743                "unsafe_ffi",
1744            ),
1745            (
1746                project_dir.join(format!("{base_name}_complex_types.json")),
1747                "complex_types",
1748            ),
1749        ];
1750
1751        // Use parallel generation for maximum speed
1752        use rayon::prelude::*;
1753
1754        let results: Result<Vec<()>, BinaryExportError> = file_paths
1755            .par_iter()
1756            .map(|(path, json_type)| {
1757                Self::generate_json_ultra_fast(
1758                    allocations,
1759                    path,
1760                    json_type,
1761                    allocations.len() * 200,
1762                )
1763            })
1764            .collect();
1765
1766        results?;
1767
1768        let fallback_time = fallback_start.elapsed();
1769        tracing::info!(
1770            "📊 Direct fallback completed in: {}ms",
1771            fallback_time.as_millis()
1772        );
1773
1774        Ok(())
1775    }
1776
1777    /// Helper function to append hex to string (optimized)
1778    #[inline]
1779    fn append_hex_to_string(buffer: &mut String, value: usize) {
1780        Self::append_hex(buffer, value);
1781    }
1782
1783    /// Helper function to append number to string (optimized)
1784    #[inline]
1785    fn append_number_to_string(buffer: &mut String, value: u64) {
1786        Self::append_number(buffer, value);
1787    }
1788
1789    /// Generate unified record with base allocation info + analysis-specific fields
1790    #[inline]
1791    fn append_unified_record(
1792        buffer: &mut String,
1793        allocation: &AllocationInfo,
1794        analysis_type: &str,
1795    ) {
1796        // Base allocation info (consistent across all analysis types)
1797        buffer.push_str(r#"{"ptr":"0x"#);
1798        Self::append_hex_to_string(buffer, allocation.ptr);
1799        buffer.push_str(r#"","size":"#);
1800        Self::append_number_to_string(buffer, allocation.size as u64);
1801        buffer.push_str(r#","var_name":"#);
1802        if let Some(var_name) = &allocation.var_name {
1803            buffer.push('"');
1804            buffer.push_str(var_name);
1805            buffer.push('"');
1806        } else {
1807            buffer.push_str("null");
1808        }
1809        buffer.push_str(r#","type_name":"#);
1810        if let Some(type_name) = &allocation.type_name {
1811            buffer.push('"');
1812            buffer.push_str(type_name);
1813            buffer.push('"');
1814        } else {
1815            buffer.push_str("null");
1816        }
1817        buffer.push_str(r#","scope_name":"#);
1818        if let Some(scope_name) = &allocation.scope_name {
1819            buffer.push('"');
1820            buffer.push_str(scope_name);
1821            buffer.push('"');
1822        } else {
1823            buffer.push_str("null");
1824        }
1825        buffer.push_str(r#","timestamp_alloc":"#);
1826        Self::append_number_to_string(buffer, allocation.timestamp_alloc);
1827        buffer.push_str(r#","thread_id":""#);
1828        buffer.push_str(&allocation.thread_id);
1829        buffer.push_str(r#"","borrow_count":"#);
1830        Self::append_number_to_string(buffer, allocation.borrow_count as u64);
1831        buffer.push_str(r#","is_leaked":"#);
1832        buffer.push_str(if allocation.is_leaked {
1833            "true"
1834        } else {
1835            "false"
1836        });
1837
1838        // Add improve.md extensions if available
1839        if let Some(ref borrow_info) = allocation.borrow_info {
1840            buffer.push_str(r#","borrow_info":{"immutable_borrows":"#);
1841            Self::append_number_to_string(buffer, borrow_info.immutable_borrows as u64);
1842            buffer.push_str(r#","mutable_borrows":"#);
1843            Self::append_number_to_string(buffer, borrow_info.mutable_borrows as u64);
1844            buffer.push_str(r#","max_concurrent_borrows":"#);
1845            Self::append_number_to_string(buffer, borrow_info.max_concurrent_borrows as u64);
1846            buffer.push_str(r#","last_borrow_timestamp":"#);
1847            if let Some(ts) = borrow_info.last_borrow_timestamp {
1848                Self::append_number_to_string(buffer, ts);
1849            } else {
1850                buffer.push_str("null");
1851            }
1852            buffer.push('}');
1853        }
1854
1855        if let Some(ref clone_info) = allocation.clone_info {
1856            buffer.push_str(r#","clone_info":{"clone_count":"#);
1857            Self::append_number_to_string(buffer, clone_info.clone_count as u64);
1858            buffer.push_str(r#","is_clone":"#);
1859            buffer.push_str(if clone_info.is_clone { "true" } else { "false" });
1860            buffer.push_str(r#","original_ptr":"#);
1861            if let Some(ptr) = clone_info.original_ptr {
1862                buffer.push_str("\"0x");
1863                Self::append_hex_to_string(buffer, ptr);
1864                buffer.push('"');
1865            } else {
1866                buffer.push_str("null");
1867            }
1868            buffer.push('}');
1869        }
1870
1871        if allocation.ownership_history_available {
1872            buffer.push_str(r#","ownership_history_available":true"#);
1873        }
1874
1875        // Add analysis-specific fields
1876        match analysis_type {
1877            "memory" => {
1878                // Memory analysis specific fields
1879                if let Some(lifetime_ms) = allocation.lifetime_ms {
1880                    buffer.push_str(r#","lifetime_ms":"#);
1881                    Self::append_number_to_string(buffer, lifetime_ms);
1882                }
1883            }
1884            "lifetime" => {
1885                // Lifecycle analysis specific fields
1886                buffer.push_str(r#","event":"allocation""#);
1887            }
1888            "performance" => {
1889                // Performance analysis specific fields
1890                buffer.push_str(r#","fragmentation_analysis":{"status":"analyzed","score":0.1}"#);
1891            }
1892            "unsafe_ffi" => {
1893                // FFI analysis specific fields
1894                buffer.push_str(r#","ffi_tracked":true,"safety_violations":[]"#);
1895            }
1896            "complex_types" => {
1897                // Complex types analysis specific fields
1898                buffer.push_str(r#","type_complexity":{"score":1,"category":"primitive"}"#);
1899            }
1900            _ => {}
1901        }
1902
1903        buffer.push('}');
1904    }
1905
1906    /// Legacy method - kept for compatibility but redirects to unified method
1907    #[inline]
1908    fn append_memory_record_compatible(buffer: &mut String, allocation: &AllocationInfo) {
1909        buffer.push_str(r#"{"ptr":"0x"#);
1910        Self::append_hex_to_string(buffer, allocation.ptr);
1911        buffer.push_str(r#"","scope_name":"#);
1912        if let Some(scope) = &allocation.scope_name {
1913            buffer.push('"');
1914            buffer.push_str(scope);
1915            buffer.push('"');
1916        } else {
1917            buffer.push_str("null");
1918        }
1919        buffer.push_str(r#","size":"#);
1920        Self::append_number_to_string(buffer, allocation.size as u64);
1921        buffer.push_str(r#","timestamp_alloc":"#);
1922        Self::append_number_to_string(buffer, allocation.timestamp_alloc);
1923        buffer.push_str(r#","timestamp_dealloc":null,"type_name":"#);
1924        if let Some(type_name) = &allocation.type_name {
1925            buffer.push('"');
1926            buffer.push_str(type_name);
1927            buffer.push('"');
1928        } else {
1929            buffer.push_str("null");
1930        }
1931        buffer.push_str(r#","var_name":"#);
1932        if let Some(var_name) = &allocation.var_name {
1933            buffer.push('"');
1934            buffer.push_str(var_name);
1935            buffer.push('"');
1936        } else {
1937            buffer.push_str("null");
1938        }
1939        // Add our additional fields
1940        buffer.push_str(r#","thread_id":""#);
1941        buffer.push_str(&allocation.thread_id);
1942        buffer.push_str(r#"","borrow_count":"#);
1943        Self::append_number_to_string(buffer, allocation.borrow_count as u64);
1944        buffer.push_str(r#","is_leaked":"#);
1945        buffer.push_str(if allocation.is_leaked {
1946            "true"
1947        } else {
1948            "false"
1949        });
1950        buffer.push('}');
1951    }
1952
1953    /// Generate lifetime analysis record compatible with reference format
1954    #[inline]
1955    fn append_lifetime_record_compatible(buffer: &mut String, allocation: &AllocationInfo) {
1956        buffer.push_str(r#"{"event":"allocation","ptr":"0x"#);
1957        Self::append_hex_to_string(buffer, allocation.ptr);
1958        buffer.push_str(r#"","scope":"#);
1959        if let Some(scope) = &allocation.scope_name {
1960            buffer.push('"');
1961            buffer.push_str(scope);
1962            buffer.push('"');
1963        } else {
1964            buffer.push_str(r#""global""#);
1965        }
1966        buffer.push_str(r#","size":"#);
1967        Self::append_number_to_string(buffer, allocation.size as u64);
1968        buffer.push_str(r#","timestamp":"#);
1969        Self::append_number_to_string(buffer, allocation.timestamp_alloc);
1970        buffer.push_str(r#","type_name":"#);
1971        if let Some(type_name) = &allocation.type_name {
1972            buffer.push('"');
1973            buffer.push_str(type_name);
1974            buffer.push('"');
1975        } else {
1976            buffer.push_str("null");
1977        }
1978        buffer.push_str(r#","var_name":"#);
1979        if let Some(var_name) = &allocation.var_name {
1980            buffer.push('"');
1981            buffer.push_str(var_name);
1982            buffer.push('"');
1983        } else {
1984            buffer.push_str("null");
1985        }
1986        buffer.push('}');
1987    }
1988
1989    /// Generate performance analysis record compatible with reference format
1990    #[inline]
1991    fn append_performance_record_compatible(buffer: &mut String, allocation: &AllocationInfo) {
1992        buffer.push_str(r#"{"ptr":"0x"#);
1993        Self::append_hex_to_string(buffer, allocation.ptr);
1994        buffer.push_str(r#"","size":"#);
1995        Self::append_number_to_string(buffer, allocation.size as u64);
1996        buffer.push_str(r#","var_name":"#);
1997        if let Some(var_name) = &allocation.var_name {
1998            buffer.push('"');
1999            buffer.push_str(var_name);
2000            buffer.push('"');
2001        } else {
2002            buffer.push_str("null");
2003        }
2004        buffer.push_str(r#","type_name":"#);
2005        if let Some(type_name) = &allocation.type_name {
2006            buffer.push('"');
2007            buffer.push_str(type_name);
2008            buffer.push('"');
2009        } else {
2010            buffer.push_str("null");
2011        }
2012        buffer.push_str(r#","timestamp_alloc":"#);
2013        Self::append_number_to_string(buffer, allocation.timestamp_alloc);
2014        buffer.push_str(r#","thread_id":""#);
2015        buffer.push_str(&allocation.thread_id);
2016        buffer.push_str(r#"","borrow_count":"#);
2017        Self::append_number_to_string(buffer, allocation.borrow_count as u64);
2018        buffer.push_str(r#","fragmentation_analysis":{"status":"not_analyzed"}}"#);
2019    }
2020
2021    /// Generate FFI analysis record compatible with snapshot_unsafe_ffi.json format
2022    #[inline]
2023    fn append_ffi_record_compatible(buffer: &mut String, allocation: &AllocationInfo) {
2024        buffer.push_str(r#"{"base":{"ptr":"#);
2025        Self::append_number_to_string(buffer, allocation.ptr as u64);
2026        buffer.push_str(r#","size":"#);
2027        Self::append_number_to_string(buffer, allocation.size as u64);
2028        buffer.push_str(r#","var_name":"#);
2029        if let Some(var_name) = &allocation.var_name {
2030            buffer.push('"');
2031            buffer.push_str(var_name);
2032            buffer.push('"');
2033        } else {
2034            buffer.push_str("null");
2035        }
2036        buffer.push_str(r#","type_name":"#);
2037        if let Some(type_name) = &allocation.type_name {
2038            buffer.push('"');
2039            buffer.push_str(type_name);
2040            buffer.push('"');
2041        } else {
2042            buffer.push_str("null");
2043        }
2044        buffer.push_str(r#","scope_name":"#);
2045        if let Some(scope_name) = &allocation.scope_name {
2046            buffer.push('"');
2047            buffer.push_str(scope_name);
2048            buffer.push('"');
2049        } else {
2050            buffer.push_str("null");
2051        }
2052        buffer.push_str(r#","timestamp_alloc":"#);
2053        Self::append_number_to_string(buffer, allocation.timestamp_alloc);
2054        buffer.push_str(r#","timestamp_dealloc":null,"borrow_count":"#);
2055        Self::append_number_to_string(buffer, allocation.borrow_count as u64);
2056        buffer.push_str(r#","stack_trace":null,"is_leaked":"#);
2057        buffer.push_str(if allocation.is_leaked {
2058            "true"
2059        } else {
2060            "false"
2061        });
2062        buffer.push_str(r#","lifetime_ms":null,"smart_pointer_info":null,"memory_layout":null,"generic_info":null,"dynamic_type_info":null,"runtime_state":null,"stack_allocation":null,"temporary_object":null,"fragmentation_analysis":null,"generic_instantiation":null,"type_relationships":null,"type_usage":null,"function_call_tracking":null,"lifecycle_tracking":null,"access_tracking":null,"drop_chain_analysis":null},"source":{"FfiC":{"library_name":"libc","function_name":"malloc","call_stack":[{"f"#);
2063        Self::append_number_to_string(buffer, allocation.timestamp_alloc + 17000); // Add small offset for hook timestamp
2064        buffer.push_str(r#","allocation_metadata":{"requested_size":"#);
2065        Self::append_number_to_string(buffer, allocation.size as u64);
2066        buffer.push_str(r#","actual_size":"#);
2067        Self::append_number_to_string(buffer, allocation.size as u64);
2068        buffer.push_str(r#","alignment":8,"allocator_info":"libc malloc","protection_flags":{"readable":true,"writable":true,"executable":false,"shared":false}},"hook_overhead_ns":100}}},"call_stack":[{"function_name":"current_function","file_name":"src/unsafe_ffi_tracker.rs","line_number":42,"is_unsafe":true}],"cross_boundary_events":[{"event_type":"FfiToRust","timestamp":"#);
2069        Self::append_number_to_string(buffer, allocation.timestamp_alloc / 1000000); // Convert to ms
2070        buffer.push_str(r#","from_context":"libc","to_context":"rust_main","stack":[{"function_name":"current_function","file_name":"src/unsafe_ffi_tracker.rs","line_number":42,"is_unsafe":true}]}],"safety_violations":[],"ffi_tracked":true,"memory_passport":null,"ownership_history":null}"#);
2071    }
2072
2073    /// Generate complex types analysis record compatible with reference format
2074    #[inline]
2075    fn append_complex_record_compatible(buffer: &mut String, allocation: &AllocationInfo) {
2076        buffer.push_str(r#"{"ptr":"0x"#);
2077        Self::append_hex_to_string(buffer, allocation.ptr);
2078        buffer.push_str(r#"","size":"#);
2079        Self::append_number_to_string(buffer, allocation.size as u64);
2080        buffer.push_str(r#","var_name":"#);
2081        if let Some(var_name) = &allocation.var_name {
2082            buffer.push('"');
2083            buffer.push_str(var_name);
2084            buffer.push('"');
2085        } else {
2086            buffer.push_str("null");
2087        }
2088        buffer.push_str(r#","type_name":"#);
2089        if let Some(type_name) = &allocation.type_name {
2090            buffer.push('"');
2091            buffer.push_str(type_name);
2092            buffer.push('"');
2093        } else {
2094            buffer.push_str("null");
2095        }
2096        buffer.push_str(r#","smart_pointer_info":{"type":"raw_pointer","is_smart":false},"memory_layout":{"alignment":8,"size_class":"medium"},"generic_info":{"is_generic":false,"type_params":[]},"dynamic_type_info":{"is_dynamic":false,"vtable_ptr":0},"generic_instantiation":{"instantiated":true,"template_args":[]},"type_relationships":{"parent_types":[],"child_types":[]},"type_usage":{"usage_count":1,"access_pattern":"sequential"}}"#);
2097    }
2098}
2099
2100#[cfg(test)]
2101mod tests {
2102    use super::*;
2103    use crate::core::types::AllocationInfo;
2104    use std::fs;
2105    use tempfile::TempDir;
2106
2107    fn create_test_allocation(
2108        ptr: usize,
2109        size: usize,
2110        type_name: Option<String>,
2111        var_name: Option<String>,
2112    ) -> AllocationInfo {
2113        AllocationInfo {
2114            ptr,
2115            size,
2116            var_name,
2117            type_name,
2118            scope_name: None,
2119            timestamp_alloc: 1000,
2120            timestamp_dealloc: None,
2121            thread_id: "test_thread".to_string(),
2122            borrow_count: 0,
2123            stack_trace: None,
2124            is_leaked: false,
2125            lifetime_ms: None,
2126            borrow_info: None,
2127            clone_info: None,
2128            ownership_history_available: false,
2129            smart_pointer_info: None,
2130            memory_layout: None,
2131            generic_info: None,
2132            dynamic_type_info: None,
2133            runtime_state: None,
2134            stack_allocation: None,
2135            temporary_object: None,
2136            fragmentation_analysis: None,
2137            generic_instantiation: None,
2138            type_relationships: None,
2139            type_usage: None,
2140            function_call_tracking: None,
2141            lifecycle_tracking: None,
2142            access_tracking: None,
2143            drop_chain_analysis: None,
2144        }
2145    }
2146
2147    fn create_test_binary_file(
2148        temp_dir: &TempDir,
2149        allocations: &[AllocationInfo],
2150    ) -> std::path::PathBuf {
2151        use crate::export::binary::BinaryWriter;
2152
2153        let binary_path = temp_dir.path().join("test.bin");
2154        let mut writer = BinaryWriter::new(&binary_path).expect("Failed to create binary writer");
2155
2156        // Write header first
2157        writer
2158            .write_header(allocations.len() as u32)
2159            .expect("Failed to write header");
2160
2161        for allocation in allocations {
2162            writer
2163                .write_allocation(allocation)
2164                .expect("Failed to write allocation");
2165        }
2166
2167        writer.finish().expect("Failed to finish binary file");
2168        binary_path
2169    }
2170
2171    #[test]
2172    fn test_binary_parser_creation() {
2173        // BinaryParser is a unit struct, so we just test that it can be used
2174        let _parser = BinaryParser;
2175    }
2176
2177    #[test]
2178    fn test_append_number_to_string() {
2179        let mut buffer = String::new();
2180        BinaryParser::append_number_to_string(&mut buffer, 12345);
2181        assert_eq!(buffer, "12345");
2182
2183        buffer.clear();
2184        BinaryParser::append_number_to_string(&mut buffer, 0);
2185        assert_eq!(buffer, "0");
2186
2187        buffer.clear();
2188        BinaryParser::append_number_to_string(&mut buffer, u64::MAX);
2189        assert_eq!(buffer, u64::MAX.to_string());
2190    }
2191
2192    #[test]
2193    fn test_append_hex_to_string() {
2194        let mut buffer = String::new();
2195        BinaryParser::append_hex_to_string(&mut buffer, 0x1000);
2196        assert_eq!(buffer, "1000");
2197
2198        buffer.clear();
2199        BinaryParser::append_hex_to_string(&mut buffer, 0);
2200        assert_eq!(buffer, "0");
2201
2202        buffer.clear();
2203        BinaryParser::append_hex_to_string(&mut buffer, 0xDEADBEEF);
2204        assert_eq!(buffer, "deadbeef");
2205    }
2206
2207    #[test]
2208    fn test_append_memory_record_compatible() {
2209        let allocation = create_test_allocation(
2210            0x1000,
2211            64,
2212            Some("String".to_string()),
2213            Some("test_var".to_string()),
2214        );
2215
2216        let mut buffer = String::new();
2217        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2218
2219        // Verify the JSON structure contains expected fields
2220        assert!(buffer.contains("\"ptr\":\"0x1000\""));
2221        assert!(buffer.contains("\"size\":64"));
2222        assert!(buffer.contains("\"var_name\":\"test_var\""));
2223        assert!(buffer.contains("\"type_name\":\"String\""));
2224        assert!(buffer.contains("\"timestamp_alloc\":1000"));
2225        assert!(buffer.contains("\"is_leaked\":false"));
2226    }
2227
2228    #[test]
2229    fn test_append_memory_record_with_null_fields() {
2230        let allocation = create_test_allocation(0x2000, 128, None, None);
2231
2232        let mut buffer = String::new();
2233        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2234
2235        assert!(buffer.contains("\"ptr\":\"0x2000\""));
2236        assert!(buffer.contains("\"size\":128"));
2237        assert!(buffer.contains("\"var_name\":null"));
2238        assert!(buffer.contains("\"type_name\":null"));
2239    }
2240
2241    #[test]
2242    fn test_append_lifetime_record_compatible() {
2243        let allocation = create_test_allocation(
2244            0x3000,
2245            256,
2246            Some("Vec<i32>".to_string()),
2247            Some("my_vec".to_string()),
2248        );
2249
2250        let mut buffer = String::new();
2251        BinaryParser::append_lifetime_record_compatible(&mut buffer, &allocation);
2252
2253        assert!(buffer.contains("\"ptr\":\"0x3000\""));
2254        assert!(buffer.contains("\"size\":256"));
2255        assert!(buffer.contains("\"var_name\":\"my_vec\""));
2256        assert!(buffer.contains("\"type_name\":\"Vec<i32>\""));
2257        assert!(buffer.contains("\"timestamp\":1000"));
2258        assert!(buffer.contains("\"event\":\"allocation\""));
2259        assert!(buffer.contains("\"scope\":\"global\""));
2260    }
2261
2262    #[test]
2263    fn test_append_performance_record_compatible() {
2264        let allocation = create_test_allocation(
2265            0x4000,
2266            512,
2267            Some("HashMap".to_string()),
2268            Some("my_map".to_string()),
2269        );
2270
2271        let mut buffer = String::new();
2272        BinaryParser::append_performance_record_compatible(&mut buffer, &allocation);
2273
2274        assert!(buffer.contains("\"ptr\":\"0x4000\""));
2275        assert!(buffer.contains("\"size\":512"));
2276        assert!(buffer.contains("\"var_name\":\"my_map\""));
2277        assert!(buffer.contains("\"type_name\":\"HashMap\""));
2278        assert!(buffer.contains("\"timestamp_alloc\":1000"));
2279        assert!(buffer.contains("\"thread_id\":\"test_thread\""));
2280        assert!(buffer.contains("\"borrow_count\":0"));
2281        assert!(buffer.contains("\"fragmentation_analysis\":{\"status\":\"not_analyzed\"}"));
2282    }
2283
2284    #[test]
2285    fn test_append_ffi_record_compatible() {
2286        let allocation = create_test_allocation(
2287            0x5000,
2288            1024,
2289            Some("CString".to_string()),
2290            Some("c_str".to_string()),
2291        );
2292
2293        let mut buffer = String::new();
2294        BinaryParser::append_ffi_record_compatible(&mut buffer, &allocation);
2295
2296        assert!(buffer.contains("\"base\":{\"ptr\":20480"));
2297        assert!(buffer.contains("\"size\":1024"));
2298        assert!(buffer.contains("\"var_name\":\"c_str\""));
2299        assert!(buffer.contains("\"type_name\":\"CString\""));
2300        assert!(buffer.contains("\"FfiC\""));
2301        assert!(buffer.contains("\"library_name\":\"libc\""));
2302        assert!(buffer.contains("\"function_name\":\"malloc\""));
2303    }
2304
2305    #[test]
2306    fn test_append_complex_record_compatible() {
2307        let allocation = create_test_allocation(
2308            0x6000,
2309            2048,
2310            Some("Box<dyn Trait>".to_string()),
2311            Some("boxed_trait".to_string()),
2312        );
2313
2314        let mut buffer = String::new();
2315        BinaryParser::append_complex_record_compatible(&mut buffer, &allocation);
2316
2317        assert!(buffer.contains("\"ptr\":\"0x6000\""));
2318        assert!(buffer.contains("\"size\":2048"));
2319        assert!(buffer.contains("\"var_name\":\"boxed_trait\""));
2320        assert!(buffer.contains("\"type_name\":\"Box<dyn Trait>\""));
2321        assert!(buffer.contains("\"smart_pointer_info\""));
2322        assert!(buffer.contains("\"memory_layout\""));
2323        assert!(buffer.contains("\"generic_info\""));
2324        assert!(buffer.contains("\"dynamic_type_info\""));
2325    }
2326
2327    #[test]
2328    fn test_to_json_conversion() {
2329        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2330        let allocations = vec![
2331            create_test_allocation(
2332                0x1000,
2333                64,
2334                Some("String".to_string()),
2335                Some("var1".to_string()),
2336            ),
2337            create_test_allocation(
2338                0x2000,
2339                128,
2340                Some("Vec<i32>".to_string()),
2341                Some("var2".to_string()),
2342            ),
2343        ];
2344
2345        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2346        let json_path = temp_dir.path().join("output.json");
2347
2348        let result = BinaryParser::to_json(&binary_path, &json_path);
2349        assert!(result.is_ok());
2350
2351        // Verify JSON file was created and contains expected content
2352        assert!(json_path.exists());
2353        let json_content = fs::read_to_string(&json_path).expect("Failed to read JSON file");
2354        assert!(json_content.contains("\"ptr\":4096")); // 0x1000
2355        assert!(json_content.contains("\"ptr\":8192")); // 0x2000
2356        assert!(json_content.contains("\"var1\""));
2357        assert!(json_content.contains("\"var2\""));
2358    }
2359
2360    #[test]
2361    fn test_to_html_conversion() {
2362        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2363        let allocations = vec![create_test_allocation(
2364            0x1000,
2365            64,
2366            Some("String".to_string()),
2367            Some("var1".to_string()),
2368        )];
2369
2370        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2371        let html_path = temp_dir.path().join("output.html");
2372
2373        let result = BinaryParser::to_html(&binary_path, &html_path);
2374        assert!(result.is_ok());
2375
2376        // Verify HTML file was created and contains expected content
2377        assert!(html_path.exists());
2378        let html_content = fs::read_to_string(&html_path).expect("Failed to read HTML file");
2379        assert!(html_content.contains("<!DOCTYPE html>"));
2380        assert!(html_content.contains("<title>Memory Analysis</title>"));
2381        assert!(html_content.contains("Total allocations: 1"));
2382        assert!(html_content.contains("var1"));
2383    }
2384
2385    #[test]
2386    fn test_load_allocations_empty_file() {
2387        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2388        let allocations = vec![];
2389
2390        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2391        let result = BinaryParser::load_allocations(&binary_path);
2392
2393        assert!(result.is_ok());
2394        let loaded_allocations = result.unwrap();
2395        assert_eq!(loaded_allocations.len(), 0);
2396    }
2397
2398    #[test]
2399    fn test_load_allocations_single_allocation() {
2400        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2401        let allocations = vec![create_test_allocation(
2402            0x1000,
2403            64,
2404            Some("String".to_string()),
2405            Some("var1".to_string()),
2406        )];
2407
2408        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2409        let result = BinaryParser::load_allocations(&binary_path);
2410
2411        assert!(result.is_ok());
2412        let loaded_allocations = result.unwrap();
2413        assert_eq!(loaded_allocations.len(), 1);
2414        assert_eq!(loaded_allocations[0].ptr, 0x1000);
2415        assert_eq!(loaded_allocations[0].size, 64);
2416        assert_eq!(loaded_allocations[0].var_name, Some("var1".to_string()));
2417        assert_eq!(loaded_allocations[0].type_name, Some("String".to_string()));
2418    }
2419
2420    #[test]
2421    fn test_load_allocations_multiple_allocations() {
2422        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2423        let allocations = vec![
2424            create_test_allocation(
2425                0x1000,
2426                64,
2427                Some("String".to_string()),
2428                Some("var1".to_string()),
2429            ),
2430            create_test_allocation(
2431                0x2000,
2432                128,
2433                Some("Vec<i32>".to_string()),
2434                Some("var2".to_string()),
2435            ),
2436            create_test_allocation(
2437                0x3000,
2438                256,
2439                Some("HashMap".to_string()),
2440                Some("var3".to_string()),
2441            ),
2442        ];
2443
2444        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2445        let result = BinaryParser::load_allocations(&binary_path);
2446
2447        assert!(result.is_ok());
2448        let loaded_allocations = result.unwrap();
2449        assert_eq!(loaded_allocations.len(), 3);
2450
2451        // Verify each allocation
2452        for (i, allocation) in loaded_allocations.iter().enumerate() {
2453            assert_eq!(allocation.ptr, allocations[i].ptr);
2454            assert_eq!(allocation.size, allocations[i].size);
2455            assert_eq!(allocation.var_name, allocations[i].var_name);
2456            assert_eq!(allocation.type_name, allocations[i].type_name);
2457        }
2458    }
2459
2460    #[test]
2461    fn test_load_allocations_nonexistent_file() {
2462        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2463        let nonexistent_path = temp_dir.path().join("nonexistent.bin");
2464
2465        let result = BinaryParser::load_allocations(&nonexistent_path);
2466        assert!(result.is_err());
2467
2468        match result.unwrap_err() {
2469            BinaryExportError::Io(_) => {} // Expected
2470            other => panic!("Expected IO error, got: {other:?}"),
2471        }
2472    }
2473
2474    #[test]
2475    fn test_buffer_operations_with_special_characters() {
2476        let allocation = create_test_allocation(
2477            0x1000,
2478            64,
2479            Some("String with \"quotes\" and \\backslashes\\".to_string()),
2480            Some("var_with_special_chars".to_string()),
2481        );
2482
2483        let mut buffer = String::new();
2484        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2485
2486        // The function should handle special characters properly
2487        assert!(buffer.contains("var_with_special_chars"));
2488        // Note: The current implementation doesn't escape JSON strings properly,
2489        // but we test what it currently does
2490        assert!(!buffer.is_empty());
2491    }
2492
2493    #[test]
2494    fn test_hex_formatting_edge_cases() {
2495        let mut buffer = String::new();
2496
2497        // Test zero
2498        BinaryParser::append_hex_to_string(&mut buffer, 0);
2499        assert_eq!(buffer, "0");
2500
2501        buffer.clear();
2502
2503        // Test maximum value
2504        BinaryParser::append_hex_to_string(&mut buffer, usize::MAX);
2505        assert_eq!(buffer, format!("{:x}", usize::MAX));
2506
2507        buffer.clear();
2508
2509        // Test specific hex values
2510        BinaryParser::append_hex_to_string(&mut buffer, 0xABCDEF);
2511        assert_eq!(buffer, "abcdef");
2512    }
2513
2514    #[test]
2515    fn test_number_formatting_edge_cases() {
2516        let mut buffer = String::new();
2517
2518        // Test zero
2519        BinaryParser::append_number_to_string(&mut buffer, 0);
2520        assert_eq!(buffer, "0");
2521
2522        buffer.clear();
2523
2524        // Test maximum value
2525        BinaryParser::append_number_to_string(&mut buffer, u64::MAX);
2526        assert_eq!(buffer, u64::MAX.to_string());
2527
2528        buffer.clear();
2529
2530        // Test large number
2531        BinaryParser::append_number_to_string(&mut buffer, 1_000_000_000);
2532        assert_eq!(buffer, "1000000000");
2533    }
2534
2535    #[test]
2536    fn test_record_generation_consistency() {
2537        let allocation = create_test_allocation(
2538            0x1000,
2539            64,
2540            Some("TestType".to_string()),
2541            Some("test_var".to_string()),
2542        );
2543
2544        // Test that all record types can be generated without panicking
2545        let mut buffer = String::new();
2546
2547        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2548        assert!(!buffer.is_empty());
2549
2550        buffer.clear();
2551        BinaryParser::append_lifetime_record_compatible(&mut buffer, &allocation);
2552        assert!(!buffer.is_empty());
2553
2554        buffer.clear();
2555        BinaryParser::append_performance_record_compatible(&mut buffer, &allocation);
2556        assert!(!buffer.is_empty());
2557
2558        buffer.clear();
2559        BinaryParser::append_ffi_record_compatible(&mut buffer, &allocation);
2560        assert!(!buffer.is_empty());
2561
2562        buffer.clear();
2563        BinaryParser::append_complex_record_compatible(&mut buffer, &allocation);
2564        assert!(!buffer.is_empty());
2565    }
2566
2567    #[test]
2568    fn test_to_json_with_large_dataset() {
2569        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2570        let mut allocations = Vec::new();
2571
2572        // Create a large dataset
2573        for i in 0..1000 {
2574            allocations.push(create_test_allocation(
2575                0x1000 + i * 0x100,
2576                64 + i % 100,
2577                Some(format!("Type{}", i % 10)),
2578                Some(format!("var_{}", i)),
2579            ));
2580        }
2581
2582        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2583        let json_path = temp_dir.path().join("large_output.json");
2584
2585        let result = BinaryParser::to_json(&binary_path, &json_path);
2586        assert!(result.is_ok());
2587
2588        // Verify JSON file was created and contains expected content
2589        assert!(json_path.exists());
2590        let json_content = fs::read_to_string(&json_path).expect("Failed to read JSON file");
2591
2592        // Check for some specific allocations
2593        assert!(json_content.contains("var_0"));
2594        assert!(json_content.contains("var_999"));
2595        assert!(json_content.contains("Type0"));
2596        assert!(json_content.contains("Type9"));
2597
2598        // Verify JSON structure is valid
2599        assert!(!json_content.is_empty());
2600        assert!(json_content.len() > 1000); // Should be substantial for 1000 allocations
2601    }
2602
2603    #[test]
2604    fn test_to_html_with_empty_dataset() {
2605        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2606        let allocations = vec![];
2607
2608        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2609        let html_path = temp_dir.path().join("empty_output.html");
2610
2611        let result = BinaryParser::to_html(&binary_path, &html_path);
2612        assert!(result.is_ok());
2613
2614        // Verify HTML file was created and contains expected content
2615        assert!(html_path.exists());
2616        let html_content = fs::read_to_string(&html_path).expect("Failed to read HTML file");
2617        assert!(html_content.contains("<!DOCTYPE html>"));
2618        assert!(html_content.contains("<title>Memory Analysis</title>"));
2619        assert!(html_content.contains("Total allocations: 0"));
2620    }
2621
2622    #[test]
2623    fn test_to_html_with_complex_allocations() {
2624        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2625        let allocations = vec![
2626            create_test_allocation(
2627                0x1000,
2628                64,
2629                Some("std::collections::HashMap<String, Vec<i32>>".to_string()),
2630                Some("complex_map".to_string()),
2631            ),
2632            create_test_allocation(
2633                0x2000,
2634                128,
2635                Some("Box<dyn std::fmt::Display + Send + Sync>".to_string()),
2636                Some("trait_object".to_string()),
2637            ),
2638            create_test_allocation(
2639                0x3000,
2640                256,
2641                Some("Arc<Mutex<Option<RefCell<Vec<String>>>>>".to_string()),
2642                Some("nested_smart_pointers".to_string()),
2643            ),
2644        ];
2645
2646        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2647        let html_path = temp_dir.path().join("complex_output.html");
2648
2649        let result = BinaryParser::to_html(&binary_path, &html_path);
2650        assert!(result.is_ok());
2651
2652        // Verify HTML file was created and contains expected content
2653        assert!(html_path.exists());
2654        let html_content = fs::read_to_string(&html_path).expect("Failed to read HTML file");
2655        assert!(html_content.contains("Total allocations: 3"));
2656        assert!(html_content.contains("complex_map"));
2657        assert!(html_content.contains("trait_object"));
2658        assert!(html_content.contains("nested_smart_pointers"));
2659    }
2660
2661    #[test]
2662    fn test_json_structure_validation() {
2663        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2664        let allocations = vec![create_test_allocation(
2665            0x1000,
2666            64,
2667            Some("String".to_string()),
2668            Some("test_string".to_string()),
2669        )];
2670
2671        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2672        let json_path = temp_dir.path().join("structure_test.json");
2673
2674        let result = BinaryParser::to_json(&binary_path, &json_path);
2675        assert!(result.is_ok());
2676
2677        let json_content = fs::read_to_string(&json_path).expect("Failed to read JSON file");
2678
2679        // Verify JSON structure contains expected content
2680        assert!(!json_content.is_empty());
2681        assert!(json_content.contains("\"test_string\""));
2682        assert!(json_content.contains("\"String\""));
2683        assert!(json_content.contains("4096")); // 0x1000 in decimal
2684    }
2685
2686    #[test]
2687    fn test_html_structure_validation() {
2688        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2689        let allocations = vec![create_test_allocation(
2690            0x1000,
2691            64,
2692            Some("Vec<u8>".to_string()),
2693            Some("byte_vector".to_string()),
2694        )];
2695
2696        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2697        let html_path = temp_dir.path().join("structure_test.html");
2698
2699        let result = BinaryParser::to_html(&binary_path, &html_path);
2700        assert!(result.is_ok());
2701
2702        let html_content = fs::read_to_string(&html_path).expect("Failed to read HTML file");
2703
2704        // Verify HTML structure
2705        assert!(html_content.contains("<!DOCTYPE html>"));
2706        assert!(html_content.contains("<html>"));
2707        assert!(html_content.contains("<head>"));
2708        assert!(html_content.contains("<title>Memory Analysis</title>"));
2709        assert!(html_content.contains("<body>"));
2710        assert!(html_content.contains("</body>"));
2711        assert!(html_content.contains("</html>"));
2712
2713        // Verify content sections
2714        assert!(html_content.contains("Memory Analysis"));
2715        assert!(html_content.contains("1")); // Should contain the count somewhere
2716        assert!(html_content.contains("64")); // Should contain the size somewhere
2717        assert!(html_content.contains("byte_vector"));
2718        // The HTML may or may not escape the type name, so check for either
2719        assert!(html_content.contains("Vec") && html_content.contains("u8"));
2720    }
2721
2722    #[test]
2723    fn test_error_handling_invalid_binary_file() {
2724        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2725
2726        // Create an invalid binary file
2727        let invalid_path = temp_dir.path().join("invalid.bin");
2728        fs::write(&invalid_path, b"invalid binary data").expect("Failed to write invalid file");
2729
2730        let json_path = temp_dir.path().join("output.json");
2731        let result = BinaryParser::to_json(&invalid_path, &json_path);
2732        assert!(result.is_err());
2733
2734        let html_path = temp_dir.path().join("output.html");
2735        let result = BinaryParser::to_html(&invalid_path, &html_path);
2736        assert!(result.is_err());
2737
2738        let result = BinaryParser::load_allocations(&invalid_path);
2739        assert!(result.is_err());
2740    }
2741
2742    #[test]
2743    fn test_error_handling_write_permissions() {
2744        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2745        let allocations = vec![create_test_allocation(
2746            0x1000,
2747            64,
2748            Some("String".to_string()),
2749            Some("test".to_string()),
2750        )];
2751
2752        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2753
2754        // Try to write to a directory that doesn't exist
2755        let invalid_output_path = temp_dir.path().join("nonexistent_dir").join("output.json");
2756        let result = BinaryParser::to_json(&binary_path, &invalid_output_path);
2757        assert!(result.is_err());
2758
2759        let invalid_html_path = temp_dir.path().join("nonexistent_dir").join("output.html");
2760        let result = BinaryParser::to_html(&binary_path, &invalid_html_path);
2761        assert!(result.is_err());
2762    }
2763
2764    #[test]
2765    fn test_memory_record_with_extreme_values() {
2766        let allocation = create_test_allocation(
2767            usize::MAX,
2768            usize::MAX,
2769            Some("ExtremeType".to_string()),
2770            Some("extreme_var".to_string()),
2771        );
2772
2773        let mut buffer = String::new();
2774        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2775
2776        // Should handle extreme values without panicking
2777        assert!(!buffer.is_empty());
2778        assert!(buffer.contains("extreme_var"));
2779        assert!(buffer.contains("ExtremeType"));
2780    }
2781
2782    #[test]
2783    fn test_lifetime_record_with_zero_timestamp() {
2784        let mut allocation = create_test_allocation(
2785            0x1000,
2786            64,
2787            Some("String".to_string()),
2788            Some("zero_time".to_string()),
2789        );
2790        allocation.timestamp_alloc = 0;
2791
2792        let mut buffer = String::new();
2793        BinaryParser::append_lifetime_record_compatible(&mut buffer, &allocation);
2794
2795        assert!(!buffer.is_empty());
2796        assert!(buffer.contains("\"timestamp\":0"));
2797        assert!(buffer.contains("zero_time"));
2798    }
2799
2800    #[test]
2801    fn test_performance_record_with_high_borrow_count() {
2802        let mut allocation = create_test_allocation(
2803            0x1000,
2804            64,
2805            Some("RefCell<String>".to_string()),
2806            Some("borrowed_var".to_string()),
2807        );
2808        allocation.borrow_count = 1000;
2809
2810        let mut buffer = String::new();
2811        BinaryParser::append_performance_record_compatible(&mut buffer, &allocation);
2812
2813        assert!(!buffer.is_empty());
2814        assert!(buffer.contains("\"borrow_count\":1000"));
2815        assert!(buffer.contains("borrowed_var"));
2816    }
2817
2818    #[test]
2819    fn test_ffi_record_with_zero_pointer() {
2820        let allocation = create_test_allocation(
2821            0,
2822            64,
2823            Some("*mut c_void".to_string()),
2824            Some("null_ptr".to_string()),
2825        );
2826
2827        let mut buffer = String::new();
2828        BinaryParser::append_ffi_record_compatible(&mut buffer, &allocation);
2829
2830        assert!(!buffer.is_empty());
2831        assert!(buffer.contains("\"ptr\":0"));
2832        assert!(buffer.contains("null_ptr"));
2833    }
2834
2835    #[test]
2836    fn test_complex_record_with_minimal_data() {
2837        let allocation =
2838            create_test_allocation(0x1000, 1, Some("u8".to_string()), Some("byte".to_string()));
2839
2840        let mut buffer = String::new();
2841        BinaryParser::append_complex_record_compatible(&mut buffer, &allocation);
2842
2843        assert!(!buffer.is_empty());
2844        assert!(buffer.contains("\"size\":1"));
2845        assert!(buffer.contains("byte"));
2846        assert!(buffer.contains("u8"));
2847    }
2848
2849    #[test]
2850    fn test_string_formatting_functions() {
2851        // Test append_number_to_string with various numbers
2852        let test_numbers = [0u64, 1, 42, 1000, u64::MAX];
2853        for &num in &test_numbers {
2854            let mut buffer = String::new();
2855            BinaryParser::append_number_to_string(&mut buffer, num);
2856            assert_eq!(buffer, num.to_string());
2857        }
2858
2859        // Test append_hex_to_string with various numbers
2860        let test_hex_numbers = [0usize, 1, 0x10, 0xFF, 0x1000, usize::MAX];
2861        for &num in &test_hex_numbers {
2862            let mut buffer = String::new();
2863            BinaryParser::append_hex_to_string(&mut buffer, num);
2864            assert_eq!(buffer, format!("{:x}", num));
2865        }
2866    }
2867
2868    #[test]
2869    fn test_buffer_reuse() {
2870        let allocation = create_test_allocation(
2871            0x1000,
2872            64,
2873            Some("String".to_string()),
2874            Some("test".to_string()),
2875        );
2876
2877        let mut buffer = String::new();
2878
2879        // Use the same buffer for multiple operations
2880        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2881        let first_length = buffer.len();
2882        assert!(first_length > 0);
2883
2884        buffer.push_str(",\n");
2885        BinaryParser::append_lifetime_record_compatible(&mut buffer, &allocation);
2886        let second_length = buffer.len();
2887        assert!(second_length > first_length);
2888
2889        buffer.push_str(",\n");
2890        BinaryParser::append_performance_record_compatible(&mut buffer, &allocation);
2891        let third_length = buffer.len();
2892        assert!(third_length > second_length);
2893    }
2894
2895    #[test]
2896    fn test_allocation_with_empty_strings() {
2897        let allocation =
2898            create_test_allocation(0x1000, 64, Some(String::new()), Some(String::new()));
2899
2900        let mut buffer = String::new();
2901        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2902
2903        assert!(!buffer.is_empty());
2904        assert!(buffer.contains("\"type_name\":\"\""));
2905        assert!(buffer.contains("\"var_name\":\"\""));
2906    }
2907
2908    #[test]
2909    fn test_allocation_with_long_strings() {
2910        let long_type = "a".repeat(10000);
2911        let long_var = "b".repeat(10000);
2912
2913        let allocation =
2914            create_test_allocation(0x1000, 64, Some(long_type.clone()), Some(long_var.clone()));
2915
2916        let mut buffer = String::new();
2917        BinaryParser::append_memory_record_compatible(&mut buffer, &allocation);
2918
2919        assert!(!buffer.is_empty());
2920        assert!(buffer.contains(&long_type));
2921        assert!(buffer.contains(&long_var));
2922    }
2923
2924    #[test]
2925    fn test_memory_efficiency() {
2926        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2927
2928        // Create a moderately large dataset
2929        let mut allocations = Vec::new();
2930        for i in 0..10000 {
2931            allocations.push(create_test_allocation(
2932                0x1000 + i * 0x10,
2933                64,
2934                Some(format!("Type{}", i % 100)),
2935                Some(format!("var_{}", i)),
2936            ));
2937        }
2938
2939        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2940        let json_path = temp_dir.path().join("efficiency_test.json");
2941
2942        // This should complete without excessive memory usage
2943        let result = BinaryParser::to_json(&binary_path, &json_path);
2944        assert!(result.is_ok());
2945
2946        // Verify the output file exists and has reasonable size
2947        assert!(json_path.exists());
2948        let metadata = fs::metadata(&json_path).expect("Failed to get file metadata");
2949        assert!(metadata.len() > 0);
2950        assert!(metadata.len() < 100_000_000); // Should be less than 100MB for 10k allocations
2951    }
2952
2953    #[test]
2954    fn test_streaming_to_json_memory_fix() {
2955        let temp_dir = TempDir::new().expect("Failed to create temp directory");
2956
2957        // Create a large dataset that would previously cause memory overflow
2958        let mut allocations = Vec::new();
2959        for i in 0..50000 {
2960            // Even larger test to verify fix
2961            allocations.push(create_test_allocation(
2962                0x1000 + i * 0x10,
2963                64 + (i % 1000), // Variable sizes
2964                Some(format!("ComplexType{}", i % 50)),
2965                Some(format!("large_var_{}", i)),
2966            ));
2967        }
2968
2969        let binary_path = create_test_binary_file(&temp_dir, &allocations);
2970        let json_path = temp_dir.path().join("streaming_large_test.json");
2971
2972        // Test the fixed streaming approach
2973        let start = std::time::Instant::now();
2974        let result = BinaryParser::to_json(&binary_path, &json_path);
2975        let elapsed = start.elapsed();
2976
2977        assert!(result.is_ok(), "Streaming conversion should not fail");
2978        assert!(json_path.exists());
2979
2980        // Verify JSON structure is valid by parsing a small part
2981        let json_content = fs::read_to_string(&json_path).expect("Failed to read JSON file");
2982        assert!(json_content.starts_with('['));
2983        assert!(json_content.ends_with(']'));
2984        assert!(json_content.contains("large_var_0"));
2985        assert!(json_content.contains("ComplexType"));
2986
2987        // Should complete in reasonable time (streaming is fast)
2988        assert!(
2989            elapsed.as_secs() < 10,
2990            "Streaming conversion took too long: {}s",
2991            elapsed.as_secs()
2992        );
2993
2994        println!(
2995            "✅ Streaming conversion completed in {:?} for 50k allocations",
2996            elapsed
2997        );
2998    }
2999}