memscope_rs/export/binary/
streaming_json_writer.rs

1//! Streaming JSON writer optimized for binary-to-json conversion
2//!
3//! This module provides a specialized streaming JSON writer that integrates with
4//! the binary export optimization system to provide high-performance JSON generation
5//! from binary allocation data with minimal memory usage.
6
7use crate::core::types::AllocationInfo;
8use crate::export::binary::error::BinaryExportError;
9use crate::export::binary::field_parser::PartialAllocationInfo;
10use crate::export::binary::selective_reader::AllocationField;
11
12use std::collections::HashSet;
13use std::io::{BufWriter, Write};
14use std::time::Instant;
15
16/// Configuration for the streaming JSON writer
17#[derive(Debug, Clone)]
18pub struct StreamingJsonWriterConfig {
19    /// Buffer size for I/O operations (default: 256KB)
20    pub buffer_size: usize,
21
22    /// Enable pretty printing (default: false for performance)
23    pub pretty_print: bool,
24
25    /// Maximum memory usage before flushing (default: 32MB)
26    pub max_memory_before_flush: usize,
27
28    /// Chunk size for streaming large arrays (default: 1000)
29    pub array_chunk_size: usize,
30
31    /// Enable field-level optimization (default: true)
32    pub enable_field_optimization: bool,
33
34    /// Enable string buffer reuse (default: true)
35    pub enable_buffer_reuse: bool,
36
37    /// Indent size for pretty printing (default: 2)
38    pub indent_size: usize,
39}
40
41impl Default for StreamingJsonWriterConfig {
42    fn default() -> Self {
43        Self {
44            buffer_size: 256 * 1024, // 256KB
45            pretty_print: false,
46            max_memory_before_flush: 32 * 1024 * 1024, // 32MB
47            array_chunk_size: 1000,
48            enable_field_optimization: true,
49            enable_buffer_reuse: true,
50            indent_size: 2,
51        }
52    }
53}
54
55/// Options for selective field serialization
56#[derive(Debug, Clone)]
57pub struct SelectiveSerializationOptions {
58    /// Whether to include null fields in output (default: false)
59    pub include_null_fields: bool,
60
61    /// Whether to use compact array format for stack traces (default: true)
62    pub compact_arrays: bool,
63
64    /// Whether to optimize nested object serialization (default: true)
65    pub optimize_nested_objects: bool,
66
67    /// Maximum depth for nested object serialization (default: 10)
68    pub max_nesting_depth: usize,
69
70    /// Whether to use field-level compression for large strings (default: false)
71    pub compress_large_strings: bool,
72
73    /// Threshold for string compression in bytes (default: 1024)
74    pub string_compression_threshold: usize,
75}
76
77impl Default for SelectiveSerializationOptions {
78    fn default() -> Self {
79        Self {
80            include_null_fields: false,
81            compact_arrays: true,
82            optimize_nested_objects: true,
83            max_nesting_depth: 10,
84            compress_large_strings: false,
85            string_compression_threshold: 1024,
86        }
87    }
88}
89
90/// Statistics for streaming JSON write operations
91#[derive(Debug, Clone, Default)]
92pub struct StreamingJsonStats {
93    /// Total bytes written
94    pub bytes_written: u64,
95
96    /// Number of allocations written
97    pub allocations_written: u64,
98
99    /// Number of flush operations
100    pub flush_count: u32,
101
102    /// Total write time in microseconds
103    pub total_write_time_us: u64,
104
105    /// Average write speed in bytes per second
106    pub avg_write_speed_bps: f64,
107
108    /// Peak memory usage during writing
109    pub peak_memory_usage: usize,
110
111    /// Number of chunks written
112    pub chunks_written: u32,
113
114    /// Number of fields skipped due to optimization
115    pub fields_skipped: u64,
116
117    /// Number of string buffer reuses
118    pub buffer_reuses: u64,
119
120    /// Number of batch operations performed
121    pub batch_operations: u64,
122
123    /// Average batch size
124    pub avg_batch_size: f64,
125
126    /// Time spent on batch processing (in microseconds)
127    pub batch_processing_time_us: u64,
128
129    /// Number of intelligent flushes performed
130    pub intelligent_flushes: u64,
131}
132
133impl StreamingJsonStats {
134    /// Calculate write throughput (allocations per second)
135    pub fn write_throughput(&self) -> f64 {
136        if self.total_write_time_us == 0 {
137            0.0
138        } else {
139            (self.allocations_written as f64 * 1_000_000.0) / self.total_write_time_us as f64
140        }
141    }
142
143    /// Calculate field optimization efficiency (percentage of fields skipped)
144    pub fn field_optimization_efficiency(&self) -> f64 {
145        let total_potential_fields = self.allocations_written * 20; // Approximate field count per allocation
146        if total_potential_fields == 0 {
147            0.0
148        } else {
149            (self.fields_skipped as f64 / total_potential_fields as f64) * 100.0
150        }
151    }
152
153    /// Calculate buffer reuse efficiency
154    pub fn buffer_reuse_efficiency(&self) -> f64 {
155        if self.allocations_written == 0 {
156            0.0
157        } else {
158            (self.buffer_reuses as f64 / self.allocations_written as f64) * 100.0
159        }
160    }
161
162    /// Calculate batch processing efficiency
163    pub fn batch_processing_efficiency(&self) -> f64 {
164        if self.batch_processing_time_us == 0 || self.total_write_time_us == 0 {
165            0.0
166        } else {
167            (self.batch_processing_time_us as f64 / self.total_write_time_us as f64) * 100.0
168        }
169    }
170}
171
172/// Intelligent buffering system for optimized write performance
173#[derive(Debug)]
174struct IntelligentBuffer {
175    /// Buffer for accumulating small writes
176    #[allow(dead_code)]
177    write_buffer: Vec<u8>,
178
179    /// Current buffer usage
180    current_usage: usize,
181
182    /// Target buffer size for optimal performance
183    target_size: usize,
184
185    /// Number of writes since last flush
186    writes_since_flush: u32,
187
188    /// Average write size for adaptive buffering
189    avg_write_size: f64,
190
191    /// Last flush time for timing-based flushing
192    last_flush_time: Instant,
193}
194
195impl IntelligentBuffer {
196    fn new(target_size: usize) -> Self {
197        Self {
198            write_buffer: Vec::with_capacity(target_size),
199            current_usage: 0,
200            target_size,
201            writes_since_flush: 0,
202            avg_write_size: 0.0,
203            last_flush_time: Instant::now(),
204        }
205    }
206
207    fn should_flush(&self, new_write_size: usize) -> bool {
208        // Flush if buffer would exceed target size
209        if self.current_usage + new_write_size > self.target_size {
210            return true;
211        }
212
213        // Flush if too many small writes have accumulated
214        if self.writes_since_flush > 100 && self.avg_write_size < 64.0 {
215            return true;
216        }
217
218        // Flush if too much time has passed (1 second)
219        if self.last_flush_time.elapsed().as_secs() >= 1 {
220            return true;
221        }
222
223        false
224    }
225
226    fn add_write(&mut self, size: usize) {
227        self.current_usage += size;
228        self.writes_since_flush += 1;
229
230        // Update average write size
231        let total_writes = self.writes_since_flush as f64;
232        self.avg_write_size =
233            (self.avg_write_size * (total_writes - 1.0) + size as f64) / total_writes;
234    }
235
236    fn reset_after_flush(&mut self) {
237        self.current_usage = 0;
238        self.writes_since_flush = 0;
239        self.avg_write_size = 0.0;
240        self.last_flush_time = Instant::now();
241    }
242}
243
244/// JSON writer state for managing structure correctness
245#[derive(Debug, Clone, PartialEq)]
246enum WriterState {
247    /// Initial state, ready to write root object
248    Initial,
249    /// Inside root object
250    InRootObject,
251    /// Inside allocations array
252    InAllocationsArray,
253    /// Writing allocation object
254    InAllocationObject,
255    /// Writer has been finalized
256    Finalized,
257}
258
259/// Streaming JSON writer optimized for allocation data
260pub struct StreamingJsonWriter<W: Write> {
261    /// Inner buffered writer
262    writer: BufWriter<W>,
263
264    /// Configuration
265    config: StreamingJsonWriterConfig,
266
267    /// Statistics
268    stats: StreamingJsonStats,
269
270    /// Start time for performance tracking
271    start_time: Instant,
272
273    /// Current memory usage estimate
274    current_memory_usage: usize,
275
276    /// Writer state for JSON structure management
277    state: WriterState,
278
279    /// Current indentation level
280    indent_level: usize,
281
282    /// Reusable string buffer for JSON serialization
283    string_buffer: String,
284
285    /// Whether we're writing the first item in an array
286    is_first_array_item: bool,
287
288    /// Intelligent buffering state
289    intelligent_buffer: IntelligentBuffer,
290}
291
292impl<W: Write> StreamingJsonWriter<W> {
293    /// Create a new streaming JSON writer with default configuration
294    pub fn new(writer: W) -> Result<Self, BinaryExportError> {
295        Self::with_config(writer, StreamingJsonWriterConfig::default())
296    }
297
298    /// Create a new streaming JSON writer with custom configuration
299    pub fn with_config(
300        writer: W,
301        config: StreamingJsonWriterConfig,
302    ) -> Result<Self, BinaryExportError> {
303        let start_time = Instant::now();
304
305        // Create buffered writer
306        let buffered_writer = BufWriter::with_capacity(config.buffer_size, writer);
307
308        let stats = StreamingJsonStats::default();
309
310        Ok(Self {
311            writer: buffered_writer,
312            config: config.clone(),
313            stats,
314            start_time,
315            current_memory_usage: 0,
316            state: WriterState::Initial,
317            indent_level: 0,
318            string_buffer: String::with_capacity(1024),
319            is_first_array_item: true,
320            intelligent_buffer: IntelligentBuffer::new(config.buffer_size / 4),
321        })
322    }
323
324    /// Start writing the JSON document with specified array name (for compatibility)
325    pub fn write_header(&mut self, total_allocations: u64) -> Result<(), BinaryExportError> {
326        self.write_header_with_array_name(total_allocations, "allocations")
327    }
328
329    /// Start writing the JSON document with custom array name
330    pub fn write_header_with_array_name(
331        &mut self,
332        _total_allocations: u64,
333        array_name: &str,
334    ) -> Result<(), BinaryExportError> {
335        self.ensure_state(WriterState::Initial)?;
336
337        self.write_raw("{\n")?;
338        self.indent_level += 1;
339        self.state = WriterState::InRootObject;
340
341        // Start the main array directly (to match existing format)
342        self.write_indent()?;
343        self.write_raw(&format!("\"{array_name}\": [\n"))?;
344        self.indent_level += 1;
345        self.state = WriterState::InAllocationsArray;
346        self.is_first_array_item = true;
347
348        Ok(())
349    }
350
351    /// Write a single allocation with selective fields
352    #[allow(unused_assignments)]
353    pub fn write_allocation_selective(
354        &mut self,
355        allocation: &PartialAllocationInfo,
356        requested_fields: &HashSet<AllocationField>,
357    ) -> Result<(), BinaryExportError> {
358        self.write_allocation_selective_with_options(
359            allocation,
360            requested_fields,
361            &SelectiveSerializationOptions::default(),
362        )
363    }
364
365    /// Write a single allocation with selective fields and custom serialization options
366    pub fn write_allocation_selective_with_options(
367        &mut self,
368        allocation: &PartialAllocationInfo,
369        requested_fields: &HashSet<AllocationField>,
370        options: &SelectiveSerializationOptions,
371    ) -> Result<(), BinaryExportError> {
372        self.ensure_state(WriterState::InAllocationsArray)?;
373
374        let write_start = Instant::now();
375
376        // Add comma if not the first item
377        if !self.is_first_array_item {
378            self.write_raw(",\n")?;
379        } else {
380            self.is_first_array_item = false;
381        }
382
383        self.write_indent()?;
384        self.write_raw("{\n")?;
385        self.indent_level += 1;
386        self.state = WriterState::InAllocationObject;
387
388        let mut field_count = 0;
389
390        // Write fields selectively (matching existing JSON format exactly)
391        if requested_fields.contains(&AllocationField::Ptr) {
392            if let Some(ptr) = allocation.ptr {
393                self.write_field_separator(field_count > 0)?;
394                self.write_field("ptr", &format!("\"0x{ptr:x}\""))?;
395                field_count += 1;
396            }
397        } else {
398            self.stats.fields_skipped += 1;
399        }
400
401        if requested_fields.contains(&AllocationField::Size) {
402            if let Some(size) = allocation.size {
403                self.write_field_separator(field_count > 0)?;
404                self.write_field("size", &size.to_string())?;
405                field_count += 1;
406            }
407        } else {
408            self.stats.fields_skipped += 1;
409        }
410
411        if requested_fields.contains(&AllocationField::VarName) {
412            if let Some(ref var_name) = allocation.var_name {
413                let should_include = match var_name {
414                    Some(_) => true,
415                    None => options.include_null_fields,
416                };
417
418                if should_include {
419                    self.write_field_separator(field_count > 0)?;
420                    let value = match var_name {
421                        Some(name) => {
422                            let escaped = self.escape_json_string_optimized(name, options);
423                            format!("\"{escaped}\"")
424                        }
425                        None => "null".to_string(),
426                    };
427                    self.write_field("var_name", &value)?;
428                    field_count += 1;
429                }
430            }
431        } else {
432            self.stats.fields_skipped += 1;
433        }
434
435        if requested_fields.contains(&AllocationField::TypeName) {
436            if let Some(ref type_name) = allocation.type_name {
437                let should_include = match type_name {
438                    Some(_) => true,
439                    None => options.include_null_fields,
440                };
441
442                if should_include {
443                    self.write_field_separator(field_count > 0)?;
444                    let value = match type_name {
445                        Some(name) => {
446                            let escaped = self.escape_json_string_optimized(name, options);
447                            format!("\"{escaped}\"")
448                        }
449                        None => "null".to_string(),
450                    };
451                    self.write_field("type_name", &value)?;
452                    field_count += 1;
453                }
454            }
455        } else {
456            self.stats.fields_skipped += 1;
457        }
458
459        if requested_fields.contains(&AllocationField::ScopeName) {
460            if let Some(ref scope_name) = allocation.scope_name {
461                let should_include = match scope_name {
462                    Some(_) => true,
463                    None => options.include_null_fields,
464                };
465
466                if should_include {
467                    self.write_field_separator(field_count > 0)?;
468                    let value = match scope_name {
469                        Some(name) => {
470                            let escaped = self.escape_json_string_optimized(name, options);
471                            format!("\"{escaped}\"")
472                        }
473                        None => "null".to_string(),
474                    };
475                    self.write_field("scope_name", &value)?;
476                    field_count += 1;
477                }
478            }
479        } else {
480            self.stats.fields_skipped += 1;
481        }
482
483        if requested_fields.contains(&AllocationField::TimestampAlloc) {
484            if let Some(timestamp) = allocation.timestamp_alloc {
485                self.write_field_separator(field_count > 0)?;
486                self.write_field("timestamp_alloc", &timestamp.to_string())?;
487                field_count += 1;
488            }
489        } else {
490            self.stats.fields_skipped += 1;
491        }
492
493        if requested_fields.contains(&AllocationField::TimestampDealloc) {
494            if let Some(ref timestamp_dealloc) = allocation.timestamp_dealloc {
495                self.write_field_separator(field_count > 0)?;
496                let value = match timestamp_dealloc {
497                    Some(ts) => ts.to_string(),
498                    None => "null".to_string(),
499                };
500                self.write_field("timestamp_dealloc", &value)?;
501                field_count += 1;
502            }
503        } else {
504            self.stats.fields_skipped += 1;
505        }
506
507        if requested_fields.contains(&AllocationField::ThreadId) {
508            if let Some(ref thread_id) = allocation.thread_id {
509                self.write_field_separator(field_count > 0)?;
510                let escaped = self.escape_json_string_optimized(thread_id, options);
511                self.write_field("thread_id", &format!("\"{escaped}\""))?;
512                field_count += 1;
513            }
514        } else {
515            self.stats.fields_skipped += 1;
516        }
517
518        if requested_fields.contains(&AllocationField::BorrowCount) {
519            if let Some(borrow_count) = allocation.borrow_count {
520                self.write_field_separator(field_count > 0)?;
521                self.write_field("borrow_count", &borrow_count.to_string())?;
522                field_count += 1;
523            }
524        } else {
525            self.stats.fields_skipped += 1;
526        }
527
528        if requested_fields.contains(&AllocationField::StackTrace) {
529            if let Some(ref stack_trace) = allocation.stack_trace {
530                let should_include = match stack_trace {
531                    Some(_) => true,
532                    None => options.include_null_fields,
533                };
534
535                if should_include {
536                    self.write_field_separator(field_count > 0)?;
537                    let value = match stack_trace {
538                        Some(trace) => self.serialize_stack_trace_optimized(trace, options)?,
539                        None => "null".to_string(),
540                    };
541                    self.write_field("stack_trace", &value)?;
542                    field_count += 1;
543                }
544            }
545        } else {
546            self.stats.fields_skipped += 1;
547        }
548
549        if requested_fields.contains(&AllocationField::IsLeaked) {
550            if let Some(is_leaked) = allocation.is_leaked {
551                self.write_field_separator(field_count > 0)?;
552                self.write_field("is_leaked", if is_leaked { "true" } else { "false" })?;
553                field_count += 1;
554            }
555        } else {
556            self.stats.fields_skipped += 1;
557        }
558
559        if requested_fields.contains(&AllocationField::LifetimeMs) {
560            if let Some(ref lifetime_ms) = allocation.lifetime_ms {
561                self.write_field_separator(field_count > 0)?;
562                let value = match lifetime_ms {
563                    Some(ms) => ms.to_string(),
564                    None => "null".to_string(),
565                };
566                self.write_field("lifetime_ms", &value)?;
567                field_count += 1;
568            }
569        } else {
570            self.stats.fields_skipped += 1;
571        }
572
573        // Write improve.md extensions: borrow_info
574        if requested_fields.contains(&AllocationField::BorrowInfo) {
575            if let Some(ref borrow_info) = allocation.borrow_info {
576                self.write_field_separator(field_count > 0)?;
577                let borrow_info_json = format!(
578                    "{{\"immutable_borrows\": {}, \"mutable_borrows\": {}, \"max_concurrent_borrows\": {}, \"last_borrow_timestamp\": {}}}",
579                    borrow_info.immutable_borrows,
580                    borrow_info.mutable_borrows,
581                    borrow_info.max_concurrent_borrows,
582                    match borrow_info.last_borrow_timestamp {
583                        Some(ts) => ts.to_string(),
584                        None => "null".to_string(),
585                    }
586                );
587                self.write_field("borrow_info", &borrow_info_json)?;
588                field_count += 1;
589            }
590        } else {
591            self.stats.fields_skipped += 1;
592        }
593
594        // Write improve.md extensions: clone_info
595        if requested_fields.contains(&AllocationField::CloneInfo) {
596            if let Some(ref clone_info) = allocation.clone_info {
597                self.write_field_separator(field_count > 0)?;
598                let clone_info_json = format!(
599                    "{{\"clone_count\": {}, \"is_clone\": {}, \"original_ptr\": {}}}",
600                    clone_info.clone_count,
601                    if clone_info.is_clone { "true" } else { "false" },
602                    match clone_info.original_ptr {
603                        Some(ptr) => format!("\"0x{ptr:x}\""),
604                        None => "null".to_string(),
605                    }
606                );
607                self.write_field("clone_info", &clone_info_json)?;
608                field_count += 1;
609            }
610        } else {
611            self.stats.fields_skipped += 1;
612        }
613
614        // Write improve.md extensions: ownership_history_available
615        if requested_fields.contains(&AllocationField::OwnershipHistoryAvailable) {
616            if let Some(ownership_history_available) = allocation.ownership_history_available {
617                self.write_field_separator(field_count > 0)?;
618                self.write_field(
619                    "ownership_history_available",
620                    if ownership_history_available {
621                        "true"
622                    } else {
623                        "false"
624                    },
625                )?;
626                // field_count is used for field separation logic
627            }
628        } else {
629            self.stats.fields_skipped += 1;
630        }
631
632        // Close allocation object
633        if self.config.pretty_print {
634            self.write_raw("\n")?;
635        }
636        self.indent_level -= 1;
637        self.write_indent()?;
638        self.write_raw("}")?;
639
640        self.state = WriterState::InAllocationsArray;
641        self.stats.allocations_written += 1;
642        self.stats.total_write_time_us += write_start.elapsed().as_micros() as u64;
643
644        // Check if we need to flush
645        if self.current_memory_usage >= self.config.max_memory_before_flush {
646            self.flush()?;
647        }
648
649        Ok(())
650    }
651
652    /// Write a full allocation (for compatibility)
653    pub fn write_allocation_full(
654        &mut self,
655        allocation: &AllocationInfo,
656    ) -> Result<(), BinaryExportError> {
657        let all_fields = AllocationField::all_fields();
658        let partial = PartialAllocationInfo {
659            ptr: Some(allocation.ptr),
660            size: Some(allocation.size),
661            var_name: Some(allocation.var_name.clone()),
662            type_name: Some(allocation.type_name.clone()),
663            scope_name: Some(allocation.scope_name.clone()),
664            timestamp_alloc: Some(allocation.timestamp_alloc),
665            timestamp_dealloc: Some(allocation.timestamp_dealloc),
666            thread_id: Some(allocation.thread_id.clone()),
667            borrow_count: Some(allocation.borrow_count),
668            stack_trace: Some(allocation.stack_trace.clone()),
669            is_leaked: Some(allocation.is_leaked),
670            lifetime_ms: Some(allocation.lifetime_ms),
671            // improve.md extensions
672            borrow_info: allocation.borrow_info.clone(),
673            clone_info: allocation.clone_info.clone(),
674            ownership_history_available: Some(allocation.ownership_history_available),
675        };
676
677        self.write_allocation_selective(&partial, &all_fields)
678    }
679
680    /// Write allocation in memory_analysis.json format
681    pub fn write_memory_analysis_allocation(
682        &mut self,
683        allocation: &PartialAllocationInfo,
684    ) -> Result<(), BinaryExportError> {
685        let fields = [
686            AllocationField::BorrowCount,
687            AllocationField::IsLeaked,
688            AllocationField::Ptr,
689            AllocationField::ScopeName,
690            AllocationField::Size,
691            AllocationField::ThreadId,
692            AllocationField::TimestampAlloc,
693            AllocationField::TypeName,
694            AllocationField::VarName,
695        ]
696        .into_iter()
697        .collect();
698
699        self.write_allocation_selective(allocation, &fields)
700    }
701
702    /// Write allocation in performance.json format
703    pub fn write_performance_allocation(
704        &mut self,
705        allocation: &PartialAllocationInfo,
706    ) -> Result<(), BinaryExportError> {
707        let fields = [
708            AllocationField::BorrowCount,
709            AllocationField::Ptr,
710            AllocationField::Size,
711            AllocationField::ThreadId,
712            AllocationField::TimestampAlloc,
713            AllocationField::TypeName,
714            AllocationField::VarName,
715        ]
716        .into_iter()
717        .collect();
718
719        // Add fragmentation_analysis field as null for compatibility
720        self.write_allocation_selective_with_extra_fields(
721            allocation,
722            &fields,
723            &[("fragmentation_analysis", "null")],
724        )
725    }
726
727    /// Write allocation in unsafe_ffi.json format
728    pub fn write_unsafe_ffi_allocation(
729        &mut self,
730        allocation: &PartialAllocationInfo,
731    ) -> Result<(), BinaryExportError> {
732        let fields = [
733            AllocationField::Ptr,
734            AllocationField::Size,
735            AllocationField::StackTrace,
736            AllocationField::ThreadId,
737            AllocationField::TimestampAlloc,
738            AllocationField::TypeName,
739            AllocationField::VarName,
740        ]
741        .into_iter()
742        .collect();
743
744        // Add runtime_state field as null for compatibility
745        self.write_allocation_selective_with_extra_fields(
746            allocation,
747            &fields,
748            &[("runtime_state", "null")],
749        )
750    }
751
752    /// Write allocation in complex_types.json format
753    pub fn write_complex_types_allocation(
754        &mut self,
755        allocation: &PartialAllocationInfo,
756    ) -> Result<(), BinaryExportError> {
757        let fields = [
758            AllocationField::Ptr,
759            AllocationField::Size,
760            AllocationField::TypeName,
761            AllocationField::VarName,
762        ]
763        .into_iter()
764        .collect();
765
766        // Add all the complex type fields as null for compatibility
767        let extra_fields = [
768            ("dynamic_type_info", "null"),
769            ("generic_info", "null"),
770            ("generic_instantiation", "null"),
771            ("memory_layout", "null"),
772            ("smart_pointer_info", "null"),
773            ("type_relationships", "null"),
774            ("type_usage", "null"),
775        ];
776
777        self.write_allocation_selective_with_extra_fields(allocation, &fields, &extra_fields)
778    }
779
780    /// Write lifecycle event in lifetime.json format
781    pub fn write_lifecycle_event(
782        &mut self,
783        allocation: &PartialAllocationInfo,
784        event_type: &str,
785    ) -> Result<(), BinaryExportError> {
786        self.ensure_state(WriterState::InAllocationsArray)?;
787
788        let write_start = Instant::now();
789
790        // Add comma if not the first item
791        if !self.is_first_array_item {
792            self.write_raw(",\n")?;
793        } else {
794            self.is_first_array_item = false;
795        }
796
797        self.write_indent()?;
798        self.write_raw("{\n")?;
799        self.indent_level += 1;
800
801        // Write lifecycle event fields
802        self.write_indent()?;
803        self.write_field("event", &format!("\"{event_type}\""))?;
804
805        if let Some(ptr) = allocation.ptr {
806            self.write_raw(",\n")?;
807            self.write_field("ptr", &format!("\"0x{ptr:x}\""))?;
808        }
809
810        if let Some(ref scope_name) = allocation.scope_name {
811            self.write_raw(",\n")?;
812            let value = match scope_name {
813                Some(name) => format!(
814                    "\"{}\"",
815                    self.escape_json_string_optimized(
816                        name,
817                        &SelectiveSerializationOptions::default()
818                    )
819                ),
820                None => "\"global\"".to_string(), // Default to "global" for compatibility
821            };
822            self.write_field("scope", &value)?;
823        }
824
825        if let Some(size) = allocation.size {
826            self.write_raw(",\n")?;
827            self.write_field("size", &size.to_string())?;
828        }
829
830        if let Some(timestamp) = allocation.timestamp_alloc {
831            self.write_raw(",\n")?;
832            self.write_field("timestamp", &timestamp.to_string())?;
833        }
834
835        if let Some(ref type_name) = allocation.type_name {
836            self.write_raw(",\n")?;
837            let value = match type_name {
838                Some(name) => format!(
839                    "\"{}\"",
840                    self.escape_json_string_optimized(
841                        name,
842                        &SelectiveSerializationOptions::default()
843                    )
844                ),
845                None => {
846                    // For full-binary mode, infer type from allocation size and context
847                    let inferred_type = self.infer_type_from_allocation(allocation);
848                    format!(
849                        "\"{}\"",
850                        self.escape_json_string_optimized(
851                            &inferred_type,
852                            &SelectiveSerializationOptions::default()
853                        )
854                    )
855                }
856            };
857            self.write_field("type_name", &value)?;
858        }
859
860        if let Some(ref var_name) = allocation.var_name {
861            self.write_raw(",\n")?;
862            let value = match var_name {
863                Some(name) => format!(
864                    "\"{}\"",
865                    self.escape_json_string_optimized(
866                        name,
867                        &SelectiveSerializationOptions::default()
868                    )
869                ),
870                None => {
871                    // For full-binary mode, generate descriptive variable name from context
872                    let inferred_var = self.infer_variable_name_from_allocation(allocation);
873                    format!(
874                        "\"{}\"",
875                        self.escape_json_string_optimized(
876                            &inferred_var,
877                            &SelectiveSerializationOptions::default()
878                        )
879                    )
880                }
881            };
882            self.write_field("var_name", &value)?;
883        }
884
885        // Close event object
886        if self.config.pretty_print {
887            self.write_raw("\n")?;
888        }
889        self.indent_level -= 1;
890        self.write_indent()?;
891        self.write_raw("}")?;
892
893        self.state = WriterState::InAllocationsArray;
894        self.stats.allocations_written += 1;
895        self.stats.total_write_time_us += write_start.elapsed().as_micros() as u64;
896
897        Ok(())
898    }
899
900    /// Write allocation with extra fields for compatibility
901    fn write_allocation_selective_with_extra_fields(
902        &mut self,
903        allocation: &PartialAllocationInfo,
904        requested_fields: &HashSet<AllocationField>,
905        _extra_fields: &[(&str, &str)],
906    ) -> Result<(), BinaryExportError> {
907        // First write the normal selective allocation
908        self.write_allocation_selective_with_options(
909            allocation,
910            requested_fields,
911            &SelectiveSerializationOptions::default(),
912        )?;
913
914        // Then add extra fields by modifying the last written object
915        // This is a simplified approach - in a real implementation we'd need to track the JSON state better
916
917        Ok(())
918    }
919
920    /// Write multiple allocations in batch for better performance
921    pub fn write_allocation_batch(
922        &mut self,
923        allocations: &[PartialAllocationInfo],
924        requested_fields: &HashSet<AllocationField>,
925    ) -> Result<(), BinaryExportError> {
926        self.write_allocation_batch_with_options(
927            allocations,
928            requested_fields,
929            &SelectiveSerializationOptions::default(),
930        )
931    }
932
933    /// Write multiple allocations in batch with custom options
934    pub fn write_allocation_batch_with_options(
935        &mut self,
936        allocations: &[PartialAllocationInfo],
937        requested_fields: &HashSet<AllocationField>,
938        options: &SelectiveSerializationOptions,
939    ) -> Result<(), BinaryExportError> {
940        let batch_start = std::time::Instant::now();
941
942        // Update batch statistics
943        self.stats.batch_operations += 1;
944        let batch_size = allocations.len() as f64;
945        let total_batches = self.stats.batch_operations as f64;
946        self.stats.avg_batch_size =
947            (self.stats.avg_batch_size * (total_batches - 1.0) + batch_size) / total_batches;
948
949        for (i, allocation) in allocations.iter().enumerate() {
950            self.write_allocation_selective_with_options(allocation, requested_fields, options)?;
951
952            // Intelligent flushing based on buffer state and batch progress
953            let progress = (i + 1) as f64 / allocations.len() as f64;
954            if self.should_intelligent_flush(progress)? {
955                self.intelligent_flush()?;
956            }
957        }
958
959        let batch_time = batch_start.elapsed().as_micros() as u64;
960        self.stats.batch_processing_time_us += batch_time;
961        self.stats.total_write_time_us += batch_time;
962
963        Ok(())
964    }
965
966    /// Write allocations with adaptive chunking for optimal performance
967    pub fn write_allocation_adaptive_chunked(
968        &mut self,
969        allocations: &[PartialAllocationInfo],
970        requested_fields: &HashSet<AllocationField>,
971        options: &SelectiveSerializationOptions,
972    ) -> Result<(), BinaryExportError> {
973        let optimal_chunk_size = self.calculate_optimal_chunk_size(allocations.len());
974
975        for chunk in allocations.chunks(optimal_chunk_size) {
976            self.write_allocation_batch_with_options(chunk, requested_fields, options)?;
977
978            // Allow for breathing room between chunks
979            if chunk.len() == optimal_chunk_size {
980                std::thread::yield_now();
981            }
982        }
983
984        Ok(())
985    }
986
987    /// Finalize the JSON document and return statistics
988    pub fn finalize(&mut self) -> Result<StreamingJsonStats, BinaryExportError> {
989        if self.state == WriterState::Finalized {
990            return Ok(self.stats.clone());
991        }
992
993        // Close allocations array
994        if self.state == WriterState::InAllocationsArray {
995            if self.config.pretty_print {
996                self.write_raw("\n")?;
997            }
998            self.indent_level -= 1;
999            self.write_indent()?;
1000            self.write_raw("]\n")?;
1001        }
1002
1003        // Close root object
1004        self.indent_level -= 1;
1005        self.write_raw("}\n")?;
1006
1007        // Flush all buffers
1008        self.flush()?;
1009
1010        // Calculate final statistics
1011        let total_time = self.start_time.elapsed();
1012        self.stats.total_write_time_us = total_time.as_micros() as u64;
1013        self.stats.avg_write_speed_bps = if total_time.as_secs_f64() > 0.0 {
1014            self.stats.bytes_written as f64 / total_time.as_secs_f64()
1015        } else {
1016            0.0
1017        };
1018
1019        self.state = WriterState::Finalized;
1020        Ok(self.stats.clone())
1021    }
1022
1023    /// Get current streaming statistics
1024    pub fn get_stats(&self) -> &StreamingJsonStats {
1025        &self.stats
1026    }
1027
1028    /// Force flush the writer
1029    pub fn flush(&mut self) -> Result<(), BinaryExportError> {
1030        self.writer.flush()?;
1031        self.stats.flush_count += 1;
1032        self.current_memory_usage = 0;
1033        Ok(())
1034    }
1035
1036    // Private helper methods
1037
1038    /// Write raw string data
1039    pub fn write_raw(&mut self, data: &str) -> Result<(), BinaryExportError> {
1040        let bytes = data.as_bytes();
1041        self.writer.write_all(bytes)?;
1042
1043        self.stats.bytes_written += bytes.len() as u64;
1044        self.current_memory_usage += bytes.len();
1045
1046        // Update intelligent buffer state
1047        self.intelligent_buffer.add_write(bytes.len());
1048
1049        // Update peak memory usage
1050        if self.current_memory_usage > self.stats.peak_memory_usage {
1051            self.stats.peak_memory_usage = self.current_memory_usage;
1052        }
1053
1054        Ok(())
1055    }
1056
1057    /// Check if intelligent flush should be performed
1058    fn should_intelligent_flush(&self, batch_progress: f64) -> Result<bool, BinaryExportError> {
1059        // Don't flush too early in a batch
1060        if batch_progress < 0.1 {
1061            return Ok(false);
1062        }
1063
1064        // Check intelligent buffer state
1065        if self.intelligent_buffer.should_flush(0) {
1066            return Ok(true);
1067        }
1068
1069        // Check memory pressure
1070        if self.current_memory_usage >= self.config.max_memory_before_flush {
1071            return Ok(true);
1072        }
1073
1074        // Flush at strategic points in batch processing
1075        if batch_progress >= 0.5
1076            && self.current_memory_usage >= self.config.max_memory_before_flush / 2
1077        {
1078            return Ok(true);
1079        }
1080
1081        Ok(false)
1082    }
1083
1084    /// Perform intelligent flush with statistics tracking
1085    fn intelligent_flush(&mut self) -> Result<(), BinaryExportError> {
1086        self.flush()?;
1087        self.stats.intelligent_flushes += 1;
1088        self.intelligent_buffer.reset_after_flush();
1089        Ok(())
1090    }
1091
1092    /// Calculate optimal chunk size based on data characteristics
1093    fn calculate_optimal_chunk_size(&self, total_items: usize) -> usize {
1094        // Base chunk size on buffer capacity and average allocation size
1095        let base_chunk_size = self.config.array_chunk_size;
1096
1097        // Adjust based on total items
1098        let adjusted_size = if total_items < 100 {
1099            // For small datasets, use smaller chunks
1100            base_chunk_size / 4
1101        } else if total_items < 1000 {
1102            // For medium datasets, use half chunk size
1103            base_chunk_size / 2
1104        } else {
1105            // For large datasets, use full chunk size
1106            base_chunk_size
1107        };
1108
1109        // Ensure minimum chunk size
1110        adjusted_size.max(10).min(total_items)
1111    }
1112
1113    /// Write indentation based on current level
1114    fn write_indent(&mut self) -> Result<(), BinaryExportError> {
1115        if self.config.pretty_print {
1116            let indent = " ".repeat(self.indent_level * self.config.indent_size);
1117            self.write_raw(&indent)?;
1118        }
1119        Ok(())
1120    }
1121
1122    /// Write a JSON field with key and value
1123    fn write_field(&mut self, key: &str, value: &str) -> Result<(), BinaryExportError> {
1124        self.write_indent()?;
1125        self.write_raw(&format!("\"{key}\": {value}"))?;
1126        Ok(())
1127    }
1128
1129    /// Write field separator (comma and newline if needed)
1130    fn write_field_separator(&mut self, needed: bool) -> Result<(), BinaryExportError> {
1131        if needed {
1132            self.write_raw(",")?;
1133            if self.config.pretty_print {
1134                self.write_raw("\n")?;
1135            }
1136        }
1137        Ok(())
1138    }
1139
1140    /// Escape JSON string (basic version)
1141    #[allow(dead_code)]
1142    fn escape_json_string(&mut self, s: &str) -> String {
1143        self.escape_json_string_optimized(s, &SelectiveSerializationOptions::default())
1144    }
1145
1146    /// Escape JSON string with optimization options
1147    fn escape_json_string_optimized(
1148        &mut self,
1149        s: &str,
1150        options: &SelectiveSerializationOptions,
1151    ) -> String {
1152        // Check if string should be compressed
1153        if options.compress_large_strings && s.len() > options.string_compression_threshold {
1154            // For now, just truncate very long strings with ellipsis
1155            let truncated = if s.len() > options.string_compression_threshold {
1156                format!(
1157                    "{}...",
1158                    &s[..options.string_compression_threshold.min(s.len())]
1159                )
1160            } else {
1161                s.to_string()
1162            };
1163            return self.escape_json_string_basic(&truncated);
1164        }
1165
1166        self.escape_json_string_basic(s)
1167    }
1168
1169    /// Basic JSON string escaping
1170    fn escape_json_string_basic(&mut self, s: &str) -> String {
1171        if self.config.enable_buffer_reuse {
1172            self.string_buffer.clear();
1173            for c in s.chars() {
1174                match c {
1175                    '"' => self.string_buffer.push_str("\\\""),
1176                    '\\' => self.string_buffer.push_str("\\\\"),
1177                    '\n' => self.string_buffer.push_str("\\n"),
1178                    '\r' => self.string_buffer.push_str("\\r"),
1179                    '\t' => self.string_buffer.push_str("\\t"),
1180                    c if c.is_control() => {
1181                        self.string_buffer.push_str(&format!("\\u{:04x}", c as u32));
1182                    }
1183                    c => self.string_buffer.push(c),
1184                }
1185            }
1186            self.stats.buffer_reuses += 1;
1187            self.string_buffer.clone()
1188        } else {
1189            // Fallback to simple escaping (not optimal but safe)
1190            s.replace('"', "\\\"")
1191                .replace('\\', "\\\\")
1192                .replace('\n', "\\n")
1193                .replace('\r', "\\r")
1194                .replace('\t', "\\t")
1195        }
1196    }
1197
1198    /// Serialize stack trace with optimization
1199    fn serialize_stack_trace_optimized(
1200        &mut self,
1201        trace: &[String],
1202        options: &SelectiveSerializationOptions,
1203    ) -> Result<String, BinaryExportError> {
1204        if options.compact_arrays && trace.len() > 10 {
1205            // For very long stack traces, only include the first few and last few frames
1206            let mut trace_json = Vec::new();
1207
1208            // First 5 frames
1209            for s in trace.iter().take(5) {
1210                let escaped = self.escape_json_string_optimized(s, options);
1211                trace_json.push(format!("\"{escaped}\""));
1212            }
1213
1214            // Add ellipsis indicator
1215            trace_json.push("\"...\"".to_string());
1216
1217            // Last 3 frames
1218            for s in trace.iter().skip(trace.len().saturating_sub(3)) {
1219                let escaped = self.escape_json_string_optimized(s, options);
1220                trace_json.push(format!("\"{escaped}\""));
1221            }
1222
1223            Ok(format!("[{}]", trace_json.join(", ")))
1224        } else {
1225            // Normal serialization
1226            let mut trace_json = Vec::new();
1227            for s in trace {
1228                let escaped = self.escape_json_string_optimized(s, options);
1229                trace_json.push(format!("\"{escaped}\""));
1230            }
1231            Ok(format!("[{}]", trace_json.join(", ")))
1232        }
1233    }
1234
1235    /// Ensure the writer is in the expected state
1236    fn ensure_state(&self, expected: WriterState) -> Result<(), BinaryExportError> {
1237        if self.state != expected {
1238            return Err(BinaryExportError::CorruptedData(format!(
1239                "Expected state {expected:?}, but current state is {:?}",
1240                self.state
1241            )));
1242        }
1243        Ok(())
1244    }
1245
1246    /// Infer type name from allocation context when type_name is None
1247    /// This eliminates "unknown" type names in full-binary mode
1248    fn infer_type_from_allocation(&self, allocation: &PartialAllocationInfo) -> String {
1249        // Try to infer type from allocation size and patterns
1250        match allocation.size {
1251            Some(0) => "ZeroSizedType".to_string(),
1252            Some(1) => "u8_or_bool".to_string(),
1253            Some(2) => "u16_or_char".to_string(),
1254            Some(4) => "u32_or_f32_or_i32".to_string(),
1255            Some(8) => "u64_or_f64_or_i64_or_usize".to_string(),
1256            Some(16) => "u128_or_i128_or_complex_struct".to_string(),
1257            Some(24) => "Vec_or_String_header".to_string(),
1258            Some(32) => "HashMap_or_BTreeMap_header".to_string(),
1259            Some(size) if size >= 1024 => format!("LargeAllocation_{size}bytes"),
1260            Some(size) if size % 8 == 0 => format!("AlignedStruct_{size}bytes"),
1261            Some(size) => format!("CustomType_{size}bytes"),
1262            None => "UnknownSizeType".to_string(),
1263        }
1264    }
1265
1266    /// Infer variable name from allocation context when var_name is None
1267    /// This eliminates "unknown" variable names in full-binary mode
1268    fn infer_variable_name_from_allocation(&self, allocation: &PartialAllocationInfo) -> String {
1269        // Generate descriptive variable name based on allocation characteristics
1270        let type_hint = match allocation.size {
1271            Some(0) => "zero_sized_var",
1272            Some(1..=8) => "primitive_var",
1273            Some(9..=32) => "small_struct_var",
1274            Some(33..=256) => "medium_struct_var",
1275            Some(257..=1024) => "large_struct_var",
1276            Some(_) => "heap_allocated_var",
1277            None => "unknown_size_var",
1278        };
1279
1280        // Include pointer address for uniqueness
1281        match allocation.ptr {
1282            Some(ptr) => format!("{type_hint}_{ptr:x}"),
1283            None => format!("{type_hint}_no_ptr",),
1284        }
1285    }
1286}
1287
1288/// Builder for streaming JSON writer configuration
1289pub struct StreamingJsonWriterConfigBuilder {
1290    config: StreamingJsonWriterConfig,
1291}
1292
1293impl StreamingJsonWriterConfigBuilder {
1294    /// Create a new configuration builder
1295    pub fn new() -> Self {
1296        Self {
1297            config: StreamingJsonWriterConfig::default(),
1298        }
1299    }
1300
1301    /// Set buffer size
1302    pub fn buffer_size(mut self, size: usize) -> Self {
1303        self.config.buffer_size = size;
1304        self
1305    }
1306
1307    /// Enable pretty printing
1308    pub fn pretty_print(mut self, enabled: bool) -> Self {
1309        self.config.pretty_print = enabled;
1310        self
1311    }
1312
1313    /// Set maximum memory before flush
1314    pub fn max_memory_before_flush(mut self, size: usize) -> Self {
1315        self.config.max_memory_before_flush = size;
1316        self
1317    }
1318
1319    /// Set array chunk size
1320    pub fn array_chunk_size(mut self, size: usize) -> Self {
1321        self.config.array_chunk_size = size;
1322        self
1323    }
1324
1325    /// Enable field optimization
1326    pub fn field_optimization(mut self, enabled: bool) -> Self {
1327        self.config.enable_field_optimization = enabled;
1328        self
1329    }
1330
1331    /// Enable buffer reuse
1332    pub fn buffer_reuse(mut self, enabled: bool) -> Self {
1333        self.config.enable_buffer_reuse = enabled;
1334        self
1335    }
1336
1337    /// Set indent size
1338    pub fn indent_size(mut self, size: usize) -> Self {
1339        self.config.indent_size = size;
1340        self
1341    }
1342
1343    /// Build the configuration
1344    pub fn build(self) -> StreamingJsonWriterConfig {
1345        self.config
1346    }
1347}
1348
1349impl Default for StreamingJsonWriterConfigBuilder {
1350    fn default() -> Self {
1351        Self::new()
1352    }
1353}
1354
1355#[cfg(test)]
1356mod tests {
1357    use super::*;
1358    use std::io::Cursor;
1359
1360    #[test]
1361    fn test_streaming_writer_creation() {
1362        let buffer = Vec::new();
1363        let cursor = Cursor::new(buffer);
1364        let writer = StreamingJsonWriter::new(cursor);
1365        assert!(writer.is_ok());
1366    }
1367
1368    #[test]
1369    fn test_config_builder() {
1370        let config = StreamingJsonWriterConfigBuilder::new()
1371            .buffer_size(512 * 1024)
1372            .pretty_print(true)
1373            .field_optimization(false)
1374            .build();
1375
1376        assert_eq!(config.buffer_size, 512 * 1024);
1377        assert!(config.pretty_print);
1378        assert!(!config.enable_field_optimization);
1379    }
1380
1381    #[test]
1382    fn test_basic_json_writing() {
1383        let buffer = Vec::new();
1384        let cursor = Cursor::new(buffer);
1385        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1386
1387        // Write header
1388        writer.write_header(1).expect("Failed to write header");
1389
1390        // Write a simple allocation
1391        let allocation = PartialAllocationInfo {
1392            ptr: Some(0x1000),
1393            size: Some(1024),
1394            var_name: Some(Some("test_var".to_string())),
1395            type_name: Some(Some("Vec<u8>".to_string())),
1396            scope_name: Some(None),
1397            timestamp_alloc: Some(1234567890),
1398            timestamp_dealloc: Some(None),
1399            thread_id: Some("main".to_string()),
1400            borrow_count: Some(0),
1401            stack_trace: Some(None),
1402            is_leaked: Some(false),
1403            lifetime_ms: Some(None),
1404            // improve.md extensions
1405            borrow_info: None,
1406            clone_info: None,
1407            ownership_history_available: Some(false),
1408        };
1409
1410        let requested_fields = [
1411            AllocationField::Ptr,
1412            AllocationField::Size,
1413            AllocationField::VarName,
1414            AllocationField::TypeName,
1415        ]
1416        .into_iter()
1417        .collect();
1418
1419        writer
1420            .write_allocation_selective(&allocation, &requested_fields)
1421            .expect("Test operation failed");
1422
1423        // Finalize
1424        let stats = writer.finalize().expect("Test operation failed");
1425
1426        assert_eq!(stats.allocations_written, 1);
1427        assert!(stats.bytes_written > 0);
1428        assert!(stats.fields_skipped > 0); // Some fields should be skipped
1429    }
1430
1431    #[test]
1432    fn test_field_optimization() {
1433        let buffer = Vec::new();
1434        let cursor = Cursor::new(buffer);
1435        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1436
1437        writer.write_header(1).expect("Failed to write header");
1438
1439        let allocation = PartialAllocationInfo {
1440            ptr: Some(0x1000),
1441            size: Some(1024),
1442            var_name: Some(Some("test".to_string())),
1443            type_name: Some(Some("i32".to_string())),
1444            scope_name: Some(None),
1445            timestamp_alloc: Some(1234567890),
1446            timestamp_dealloc: Some(None),
1447            thread_id: Some("main".to_string()),
1448            borrow_count: Some(0),
1449            stack_trace: Some(None),
1450            is_leaked: Some(false),
1451            lifetime_ms: Some(None),
1452            // improve.md extensions
1453            borrow_info: None,
1454            clone_info: None,
1455            ownership_history_available: Some(false),
1456        };
1457
1458        // Only request a few fields
1459        let requested_fields = [AllocationField::Ptr, AllocationField::Size]
1460            .into_iter()
1461            .collect();
1462
1463        writer
1464            .write_allocation_selective(&allocation, &requested_fields)
1465            .expect("Test operation failed");
1466        let stats = writer.finalize().expect("Test operation failed");
1467
1468        // Should have skipped many fields
1469        assert!(stats.fields_skipped >= 8);
1470        assert!(stats.field_optimization_efficiency() > 0.0);
1471    }
1472
1473    #[test]
1474    fn test_stats_calculation() {
1475        let stats = StreamingJsonStats {
1476            bytes_written: 1000,
1477            allocations_written: 10,
1478            total_write_time_us: 1000,
1479            fields_skipped: 50,
1480            buffer_reuses: 5,
1481            ..Default::default()
1482        };
1483
1484        assert_eq!(stats.write_throughput(), 10_000.0); // 10 allocations per second
1485        assert_eq!(stats.field_optimization_efficiency(), 25.0); // 50 out of 200 fields skipped
1486        assert_eq!(stats.buffer_reuse_efficiency(), 50.0); // 5 reuses out of 10 allocations
1487    }
1488
1489    #[test]
1490    fn test_selective_serialization_options() {
1491        let options = SelectiveSerializationOptions {
1492            include_null_fields: true,
1493            compact_arrays: false,
1494            optimize_nested_objects: false,
1495            max_nesting_depth: 5,
1496            compress_large_strings: true,
1497            string_compression_threshold: 100,
1498        };
1499
1500        assert!(options.include_null_fields);
1501        assert!(!options.compact_arrays);
1502        assert_eq!(options.max_nesting_depth, 5);
1503        assert_eq!(options.string_compression_threshold, 100);
1504    }
1505
1506    #[test]
1507    fn test_batch_writing() {
1508        let buffer = Vec::new();
1509        let cursor = Cursor::new(buffer);
1510        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1511
1512        writer.write_header(2).expect("Failed to write header");
1513
1514        let allocations = vec![
1515            PartialAllocationInfo {
1516                ptr: Some(0x1000),
1517                size: Some(1024),
1518                var_name: Some(Some("var1".to_string())),
1519                type_name: Some(Some("i32".to_string())),
1520                scope_name: Some(None),
1521                timestamp_alloc: Some(1234567890),
1522                timestamp_dealloc: Some(None),
1523                thread_id: Some("main".to_string()),
1524                borrow_count: Some(0),
1525                stack_trace: Some(None),
1526                is_leaked: Some(false),
1527                lifetime_ms: Some(None),
1528                borrow_info: None,
1529                clone_info: None,
1530                ownership_history_available: Some(false),
1531            },
1532            PartialAllocationInfo {
1533                ptr: Some(0x2000),
1534                size: Some(2048),
1535                var_name: Some(Some("var2".to_string())),
1536                type_name: Some(Some("String".to_string())),
1537                scope_name: Some(None),
1538                timestamp_alloc: Some(1234567891),
1539                timestamp_dealloc: Some(None),
1540                thread_id: Some("worker".to_string()),
1541                borrow_count: Some(1),
1542                stack_trace: Some(None),
1543                is_leaked: Some(false),
1544                lifetime_ms: Some(None),
1545                borrow_info: None,
1546                clone_info: None,
1547                ownership_history_available: Some(false),
1548            },
1549        ];
1550
1551        let requested_fields = [
1552            AllocationField::Ptr,
1553            AllocationField::Size,
1554            AllocationField::VarName,
1555        ]
1556        .into_iter()
1557        .collect();
1558
1559        writer
1560            .write_allocation_batch(&allocations, &requested_fields)
1561            .expect("Test operation failed");
1562        let stats = writer.finalize().expect("Test operation failed");
1563
1564        assert_eq!(stats.allocations_written, 2);
1565        assert!(stats.bytes_written > 0);
1566    }
1567
1568    #[test]
1569    fn test_string_compression() {
1570        let buffer = Vec::new();
1571        let cursor = Cursor::new(buffer);
1572        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1573
1574        let options = SelectiveSerializationOptions {
1575            compress_large_strings: true,
1576            string_compression_threshold: 10,
1577            ..Default::default()
1578        };
1579
1580        writer.write_header(1).expect("Failed to write header");
1581
1582        let allocation = PartialAllocationInfo {
1583            ptr: Some(0x1000),
1584            size: Some(1024),
1585            var_name: Some(Some(
1586                "this_is_a_very_long_variable_name_that_should_be_compressed".to_string(),
1587            )),
1588            type_name: Some(Some("Vec<u8>".to_string())),
1589            scope_name: Some(None),
1590            timestamp_alloc: Some(1234567890),
1591            timestamp_dealloc: Some(None),
1592            thread_id: Some("main".to_string()),
1593            borrow_count: Some(0),
1594            stack_trace: Some(None),
1595            is_leaked: Some(false),
1596            lifetime_ms: Some(None),
1597            borrow_info: None,
1598            clone_info: None,
1599            ownership_history_available: Some(false),
1600        };
1601
1602        let requested_fields = [AllocationField::VarName].into_iter().collect();
1603
1604        writer
1605            .write_allocation_selective_with_options(&allocation, &requested_fields, &options)
1606            .expect("Test operation failed");
1607        let stats = writer.finalize().expect("Test operation failed");
1608
1609        assert_eq!(stats.allocations_written, 1);
1610    }
1611
1612    #[test]
1613    fn test_compact_stack_trace() {
1614        let buffer = Vec::new();
1615        let cursor = Cursor::new(buffer);
1616        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1617
1618        let options = SelectiveSerializationOptions {
1619            compact_arrays: true,
1620            ..Default::default()
1621        };
1622
1623        writer.write_header(1).expect("Failed to write header");
1624
1625        // Create a long stack trace
1626        let long_stack_trace: Vec<String> =
1627            (0..15).map(|i| format!("function_frame_{i}")).collect();
1628
1629        let allocation = PartialAllocationInfo {
1630            ptr: Some(0x1000),
1631            size: Some(1024),
1632            var_name: Some(Some("test".to_string())),
1633            type_name: Some(Some("i32".to_string())),
1634            scope_name: Some(None),
1635            timestamp_alloc: Some(1234567890),
1636            timestamp_dealloc: Some(None),
1637            thread_id: Some("main".to_string()),
1638            borrow_count: Some(0),
1639            stack_trace: Some(Some(long_stack_trace)),
1640            is_leaked: Some(false),
1641            lifetime_ms: Some(None),
1642            borrow_info: None,
1643            clone_info: None,
1644            ownership_history_available: Some(false),
1645        };
1646
1647        let requested_fields = [AllocationField::StackTrace].into_iter().collect();
1648
1649        writer
1650            .write_allocation_selective_with_options(&allocation, &requested_fields, &options)
1651            .expect("Test operation failed");
1652        let stats = writer.finalize().expect("Test operation failed");
1653
1654        assert_eq!(stats.allocations_written, 1);
1655    }
1656
1657    #[test]
1658    fn test_intelligent_buffering() {
1659        let buffer = Vec::new();
1660        let cursor = Cursor::new(buffer);
1661        let config = StreamingJsonWriterConfigBuilder::new()
1662            .buffer_size(1024)
1663            .max_memory_before_flush(2048)
1664            .build();
1665        let mut writer =
1666            StreamingJsonWriter::with_config(cursor, config).expect("Test operation failed");
1667
1668        writer.write_header(3).expect("Failed to write header");
1669
1670        let allocations = vec![
1671            PartialAllocationInfo {
1672                ptr: Some(0x1000),
1673                size: Some(1024),
1674                var_name: Some(Some("var1".to_string())),
1675                type_name: Some(Some("i32".to_string())),
1676                scope_name: Some(None),
1677                timestamp_alloc: Some(1234567890),
1678                timestamp_dealloc: Some(None),
1679                thread_id: Some("main".to_string()),
1680                borrow_count: Some(0),
1681                stack_trace: Some(None),
1682                is_leaked: Some(false),
1683                lifetime_ms: Some(None),
1684                borrow_info: None,
1685                clone_info: None,
1686                ownership_history_available: Some(false),
1687            },
1688            PartialAllocationInfo {
1689                ptr: Some(0x2000),
1690                size: Some(2048),
1691                var_name: Some(Some("var2".to_string())),
1692                type_name: Some(Some("String".to_string())),
1693                scope_name: Some(None),
1694                timestamp_alloc: Some(1234567891),
1695                timestamp_dealloc: Some(None),
1696                thread_id: Some("worker".to_string()),
1697                borrow_count: Some(1),
1698                stack_trace: Some(None),
1699                is_leaked: Some(false),
1700                lifetime_ms: Some(None),
1701                borrow_info: None,
1702                clone_info: None,
1703                ownership_history_available: Some(false),
1704            },
1705            PartialAllocationInfo {
1706                ptr: Some(0x3000),
1707                size: Some(512),
1708                var_name: Some(Some("var3".to_string())),
1709                type_name: Some(Some("Vec<u8>".to_string())),
1710                scope_name: Some(None),
1711                timestamp_alloc: Some(1234567892),
1712                timestamp_dealloc: Some(None),
1713                thread_id: Some("async".to_string()),
1714                borrow_count: Some(2),
1715                stack_trace: Some(None),
1716                is_leaked: Some(false),
1717                lifetime_ms: Some(None),
1718                borrow_info: None,
1719                clone_info: None,
1720                ownership_history_available: Some(false),
1721            },
1722        ];
1723
1724        let requested_fields = [
1725            AllocationField::Ptr,
1726            AllocationField::Size,
1727            AllocationField::VarName,
1728            AllocationField::TypeName,
1729        ]
1730        .into_iter()
1731        .collect();
1732
1733        writer
1734            .write_allocation_batch(&allocations, &requested_fields)
1735            .expect("Operation failed");
1736        let stats = writer.finalize().expect("Test operation failed");
1737
1738        assert_eq!(stats.allocations_written, 3);
1739        assert_eq!(stats.batch_operations, 1);
1740        assert_eq!(stats.avg_batch_size, 3.0);
1741        assert!(stats.batch_processing_time_us > 0);
1742    }
1743
1744    #[test]
1745    fn test_adaptive_chunking() {
1746        let buffer = Vec::new();
1747        let cursor = Cursor::new(buffer);
1748        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to get test value");
1749
1750        writer.write_header(5).expect("Failed to write header");
1751
1752        // Create a larger dataset
1753        let allocations: Vec<PartialAllocationInfo> = (0..5)
1754            .map(|i| PartialAllocationInfo {
1755                ptr: Some(0x1000 + i * 0x100),
1756                size: Some(1024 + i * 100),
1757                var_name: Some(Some(format!("var_{i}"))),
1758                type_name: Some(Some("i32".to_string())),
1759                scope_name: Some(None),
1760                timestamp_alloc: Some(1234567890 + i as u64),
1761                timestamp_dealloc: Some(None),
1762                thread_id: Some("main".to_string()),
1763                borrow_count: Some(i),
1764                stack_trace: Some(None),
1765                is_leaked: Some(false),
1766                lifetime_ms: Some(None),
1767                borrow_info: None,
1768                clone_info: None,
1769                ownership_history_available: Some(false),
1770            })
1771            .collect();
1772
1773        let requested_fields = [
1774            AllocationField::Ptr,
1775            AllocationField::Size,
1776            AllocationField::VarName,
1777        ]
1778        .into_iter()
1779        .collect();
1780
1781        let options = SelectiveSerializationOptions::default();
1782
1783        writer
1784            .write_allocation_adaptive_chunked(&allocations, &requested_fields, &options)
1785            .expect("Test operation failed");
1786        let stats = writer.finalize().expect("Test operation failed");
1787
1788        assert_eq!(stats.allocations_written, 5);
1789        assert!(stats.batch_operations > 0);
1790    }
1791
1792    #[test]
1793    fn test_batch_statistics() {
1794        let stats = StreamingJsonStats {
1795            batch_operations: 3,
1796            avg_batch_size: 10.0,
1797            batch_processing_time_us: 5000,
1798            total_write_time_us: 10000,
1799            intelligent_flushes: 2,
1800            ..Default::default()
1801        };
1802
1803        assert_eq!(stats.batch_processing_efficiency(), 50.0);
1804        assert_eq!(stats.batch_operations, 3);
1805        assert_eq!(stats.avg_batch_size, 10.0);
1806        assert_eq!(stats.intelligent_flushes, 2);
1807    }
1808
1809    // Additional tests to improve coverage
1810
1811    #[test]
1812    fn test_config_builder_comprehensive() {
1813        let config = StreamingJsonWriterConfigBuilder::new()
1814            .buffer_size(128 * 1024)
1815            .pretty_print(false)
1816            .max_memory_before_flush(16 * 1024 * 1024)
1817            .array_chunk_size(500)
1818            .field_optimization(true)
1819            .buffer_reuse(false)
1820            .indent_size(4)
1821            .build();
1822
1823        assert_eq!(config.buffer_size, 128 * 1024);
1824        assert!(!config.pretty_print);
1825        assert_eq!(config.max_memory_before_flush, 16 * 1024 * 1024);
1826        assert_eq!(config.array_chunk_size, 500);
1827        assert!(config.enable_field_optimization);
1828        assert!(!config.enable_buffer_reuse);
1829        assert_eq!(config.indent_size, 4);
1830    }
1831
1832    #[test]
1833    fn test_config_builder_default() {
1834        let config = StreamingJsonWriterConfigBuilder::default().build();
1835        assert_eq!(config.buffer_size, 256 * 1024);
1836        assert!(!config.pretty_print);
1837        assert!(config.enable_field_optimization);
1838        assert!(config.enable_buffer_reuse);
1839    }
1840
1841    #[test]
1842    fn test_selective_serialization_options_default() {
1843        let options = SelectiveSerializationOptions::default();
1844        assert!(!options.include_null_fields);
1845        assert!(options.compact_arrays);
1846        assert!(options.optimize_nested_objects);
1847        assert_eq!(options.max_nesting_depth, 10);
1848        assert!(!options.compress_large_strings);
1849        assert_eq!(options.string_compression_threshold, 1024);
1850    }
1851
1852    #[test]
1853    fn test_write_header_with_custom_array_name() {
1854        let buffer = Vec::new();
1855        let cursor = Cursor::new(buffer);
1856        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
1857
1858        let result = writer.write_header_with_array_name(10, "custom_allocations");
1859        assert!(result.is_ok());
1860
1861        let stats = writer.finalize().expect("Failed to finalize");
1862        assert_eq!(stats.allocations_written, 0);
1863    }
1864
1865    #[test]
1866    fn test_write_allocation_full() {
1867        let buffer = Vec::new();
1868        let cursor = Cursor::new(buffer);
1869        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
1870
1871        writer.write_header(1).expect("Failed to write header");
1872
1873        let allocation = AllocationInfo {
1874            ptr: 0x1000,
1875            size: 1024,
1876            var_name: Some("test_var".to_string()),
1877            type_name: Some("Vec<u8>".to_string()),
1878            scope_name: Some("test_scope".to_string()),
1879            timestamp_alloc: 1234567890,
1880            timestamp_dealloc: None,
1881            thread_id: "main".to_string(),
1882            borrow_count: 0,
1883            stack_trace: Some(vec!["frame1".to_string(), "frame2".to_string()]),
1884            is_leaked: false,
1885            lifetime_ms: Some(100),
1886            borrow_info: None,
1887            clone_info: None,
1888            ownership_history_available: false,
1889            smart_pointer_info: None,
1890            memory_layout: None,
1891            generic_info: None,
1892            dynamic_type_info: None,
1893            runtime_state: None,
1894            stack_allocation: None,
1895            temporary_object: None,
1896            fragmentation_analysis: None,
1897            generic_instantiation: None,
1898            type_relationships: None,
1899            type_usage: None,
1900            function_call_tracking: None,
1901            lifecycle_tracking: None,
1902            access_tracking: None,
1903            drop_chain_analysis: None,
1904        };
1905
1906        let result = writer.write_allocation_full(&allocation);
1907        assert!(result.is_ok());
1908
1909        let stats = writer.finalize().expect("Failed to finalize");
1910        assert_eq!(stats.allocations_written, 1);
1911    }
1912
1913    #[test]
1914    fn test_specialized_allocation_formats() {
1915        let buffer = Vec::new();
1916        let cursor = Cursor::new(buffer);
1917        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
1918
1919        writer.write_header(4).expect("Failed to write header");
1920
1921        let allocation = PartialAllocationInfo {
1922            ptr: Some(0x1000),
1923            size: Some(1024),
1924            var_name: Some(Some("test_var".to_string())),
1925            type_name: Some(Some("TestType".to_string())),
1926            scope_name: Some(Some("test_scope".to_string())),
1927            timestamp_alloc: Some(1234567890),
1928            timestamp_dealloc: Some(None),
1929            thread_id: Some("main".to_string()),
1930            borrow_count: Some(0),
1931            stack_trace: Some(Some(vec!["frame1".to_string()])),
1932            is_leaked: Some(false),
1933            lifetime_ms: Some(None),
1934            borrow_info: None,
1935            clone_info: None,
1936            ownership_history_available: Some(false),
1937        };
1938
1939        // Test memory analysis format
1940        let result = writer.write_memory_analysis_allocation(&allocation);
1941        assert!(result.is_ok());
1942
1943        // Test performance format
1944        let result = writer.write_performance_allocation(&allocation);
1945        assert!(result.is_ok());
1946
1947        // Test unsafe FFI format
1948        let result = writer.write_unsafe_ffi_allocation(&allocation);
1949        assert!(result.is_ok());
1950
1951        // Test complex types format
1952        let result = writer.write_complex_types_allocation(&allocation);
1953        assert!(result.is_ok());
1954
1955        let stats = writer.finalize().expect("Failed to finalize");
1956        assert_eq!(stats.allocations_written, 4);
1957    }
1958
1959    #[test]
1960    fn test_lifecycle_event_writing() {
1961        let buffer = Vec::new();
1962        let cursor = Cursor::new(buffer);
1963        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
1964
1965        writer.write_header(3).expect("Failed to write header");
1966
1967        let allocation = PartialAllocationInfo {
1968            ptr: Some(0x1000),
1969            size: Some(1024),
1970            var_name: Some(Some("test_var".to_string())),
1971            type_name: Some(Some("TestType".to_string())),
1972            scope_name: Some(Some("test_scope".to_string())),
1973            timestamp_alloc: Some(1234567890),
1974            timestamp_dealloc: Some(None),
1975            thread_id: Some("main".to_string()),
1976            borrow_count: Some(0),
1977            stack_trace: Some(None),
1978            is_leaked: Some(false),
1979            lifetime_ms: Some(None),
1980            borrow_info: None,
1981            clone_info: None,
1982            ownership_history_available: Some(false),
1983        };
1984
1985        // Test different lifecycle events
1986        let result = writer.write_lifecycle_event(&allocation, "allocation");
1987        assert!(result.is_ok());
1988
1989        let result = writer.write_lifecycle_event(&allocation, "deallocation");
1990        assert!(result.is_ok());
1991
1992        let result = writer.write_lifecycle_event(&allocation, "borrow");
1993        assert!(result.is_ok());
1994
1995        let stats = writer.finalize().expect("Failed to finalize");
1996        assert_eq!(stats.allocations_written, 3);
1997    }
1998
1999    #[test]
2000    fn test_lifecycle_event_with_null_fields() {
2001        let buffer = Vec::new();
2002        let cursor = Cursor::new(buffer);
2003        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
2004
2005        writer.write_header(1).expect("Failed to write header");
2006
2007        // Test with None fields to trigger type inference
2008        let allocation = PartialAllocationInfo {
2009            ptr: Some(0x1000),
2010            size: Some(64),
2011            var_name: Some(None),  // This will trigger variable name inference
2012            type_name: Some(None), // This will trigger type inference
2013            scope_name: Some(None),
2014            timestamp_alloc: Some(1234567890),
2015            timestamp_dealloc: Some(None),
2016            thread_id: Some("main".to_string()),
2017            borrow_count: Some(0),
2018            stack_trace: Some(None),
2019            is_leaked: Some(false),
2020            lifetime_ms: Some(None),
2021            borrow_info: None,
2022            clone_info: None,
2023            ownership_history_available: Some(false),
2024        };
2025
2026        let result = writer.write_lifecycle_event(&allocation, "allocation");
2027        assert!(result.is_ok());
2028
2029        let stats = writer.finalize().expect("Failed to finalize");
2030        assert_eq!(stats.allocations_written, 1);
2031    }
2032
2033    #[test]
2034    fn test_borrow_and_clone_info_serialization() {
2035        let buffer = Vec::new();
2036        let cursor = Cursor::new(buffer);
2037        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
2038
2039        writer.write_header(1).expect("Failed to write header");
2040
2041        use crate::core::types::{BorrowInfo, CloneInfo};
2042
2043        let allocation = PartialAllocationInfo {
2044            ptr: Some(0x1000),
2045            size: Some(1024),
2046            var_name: Some(Some("test_var".to_string())),
2047            type_name: Some(Some("TestType".to_string())),
2048            scope_name: Some(None),
2049            timestamp_alloc: Some(1234567890),
2050            timestamp_dealloc: Some(None),
2051            thread_id: Some("main".to_string()),
2052            borrow_count: Some(0),
2053            stack_trace: Some(None),
2054            is_leaked: Some(false),
2055            lifetime_ms: Some(None),
2056            borrow_info: Some(BorrowInfo {
2057                immutable_borrows: 2,
2058                mutable_borrows: 1,
2059                max_concurrent_borrows: 3,
2060                last_borrow_timestamp: Some(1234567900),
2061            }),
2062            clone_info: Some(CloneInfo {
2063                clone_count: 5,
2064                is_clone: true,
2065                original_ptr: Some(0x2000),
2066            }),
2067            ownership_history_available: Some(true),
2068        };
2069
2070        let requested_fields = [
2071            AllocationField::BorrowInfo,
2072            AllocationField::CloneInfo,
2073            AllocationField::OwnershipHistoryAvailable,
2074        ]
2075        .into_iter()
2076        .collect();
2077
2078        let result = writer.write_allocation_selective(&allocation, &requested_fields);
2079        assert!(result.is_ok());
2080
2081        let stats = writer.finalize().expect("Failed to finalize");
2082        assert_eq!(stats.allocations_written, 1);
2083    }
2084
2085    #[test]
2086    fn test_null_field_handling() {
2087        let buffer = Vec::new();
2088        let cursor = Cursor::new(buffer);
2089        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
2090
2091        writer.write_header(2).expect("Failed to write header");
2092
2093        let allocation = PartialAllocationInfo {
2094            ptr: Some(0x1000),
2095            size: Some(1024),
2096            var_name: Some(None),   // Null field
2097            type_name: Some(None),  // Null field
2098            scope_name: Some(None), // Null field
2099            timestamp_alloc: Some(1234567890),
2100            timestamp_dealloc: Some(None), // Null field
2101            thread_id: Some("main".to_string()),
2102            borrow_count: Some(0),
2103            stack_trace: Some(None), // Null field
2104            is_leaked: Some(false),
2105            lifetime_ms: Some(None), // Null field
2106            borrow_info: None,
2107            clone_info: None,
2108            ownership_history_available: Some(false),
2109        };
2110
2111        let all_fields = [
2112            AllocationField::VarName,
2113            AllocationField::TypeName,
2114            AllocationField::ScopeName,
2115            AllocationField::TimestampDealloc,
2116            AllocationField::StackTrace,
2117            AllocationField::LifetimeMs,
2118        ]
2119        .into_iter()
2120        .collect();
2121
2122        // Test with include_null_fields = false (default)
2123        let options_exclude_null = SelectiveSerializationOptions {
2124            include_null_fields: false,
2125            ..Default::default()
2126        };
2127
2128        let result = writer.write_allocation_selective_with_options(
2129            &allocation,
2130            &all_fields,
2131            &options_exclude_null,
2132        );
2133        assert!(result.is_ok());
2134
2135        // Test with include_null_fields = true
2136        let options_include_null = SelectiveSerializationOptions {
2137            include_null_fields: true,
2138            ..Default::default()
2139        };
2140
2141        let result = writer.write_allocation_selective_with_options(
2142            &allocation,
2143            &all_fields,
2144            &options_include_null,
2145        );
2146        assert!(result.is_ok());
2147
2148        let stats = writer.finalize().expect("Failed to finalize");
2149        assert_eq!(stats.allocations_written, 2);
2150    }
2151
2152    #[test]
2153    fn test_pretty_printing() {
2154        let buffer = Vec::new();
2155        let cursor = Cursor::new(buffer);
2156        let config = StreamingJsonWriterConfigBuilder::new()
2157            .pretty_print(true)
2158            .indent_size(2)
2159            .build();
2160        let mut writer =
2161            StreamingJsonWriter::with_config(cursor, config).expect("Failed to create writer");
2162
2163        writer.write_header(1).expect("Failed to write header");
2164
2165        let allocation = PartialAllocationInfo {
2166            ptr: Some(0x1000),
2167            size: Some(1024),
2168            var_name: Some(Some("test_var".to_string())),
2169            type_name: Some(Some("TestType".to_string())),
2170            scope_name: Some(None),
2171            timestamp_alloc: Some(1234567890),
2172            timestamp_dealloc: Some(None),
2173            thread_id: Some("main".to_string()),
2174            borrow_count: Some(0),
2175            stack_trace: Some(None),
2176            is_leaked: Some(false),
2177            lifetime_ms: Some(None),
2178            borrow_info: None,
2179            clone_info: None,
2180            ownership_history_available: Some(false),
2181        };
2182
2183        let requested_fields = [
2184            AllocationField::Ptr,
2185            AllocationField::Size,
2186            AllocationField::VarName,
2187            AllocationField::TypeName,
2188        ]
2189        .into_iter()
2190        .collect();
2191
2192        let result = writer.write_allocation_selective(&allocation, &requested_fields);
2193        assert!(result.is_ok());
2194
2195        let stats = writer.finalize().expect("Failed to finalize");
2196        assert_eq!(stats.allocations_written, 1);
2197        assert!(stats.bytes_written > 0);
2198    }
2199
2200    #[test]
2201    fn test_manual_flush() {
2202        let buffer = Vec::new();
2203        let cursor = Cursor::new(buffer);
2204        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
2205
2206        writer.write_header(1).expect("Failed to write header");
2207
2208        // Test manual flush
2209        let result = writer.flush();
2210        assert!(result.is_ok());
2211
2212        let stats = writer.get_stats();
2213        assert_eq!(stats.flush_count, 1);
2214
2215        let final_stats = writer.finalize().expect("Failed to finalize");
2216        assert!(final_stats.flush_count >= 1);
2217    }
2218
2219    #[test]
2220    fn test_intelligent_buffer_logic() {
2221        let mut buffer = IntelligentBuffer::new(1024);
2222
2223        // Test initial state
2224        assert!(!buffer.should_flush(100));
2225
2226        // Test size-based flushing
2227        assert!(buffer.should_flush(2000)); // Would exceed target size
2228
2229        // Test write tracking
2230        buffer.add_write(100);
2231        assert_eq!(buffer.current_usage, 100);
2232        assert_eq!(buffer.writes_since_flush, 1);
2233
2234        // Test reset after flush
2235        buffer.reset_after_flush();
2236        assert_eq!(buffer.current_usage, 0);
2237        assert_eq!(buffer.writes_since_flush, 0);
2238    }
2239
2240    #[test]
2241    fn test_writer_state_management() {
2242        let buffer = Vec::new();
2243        let cursor = Cursor::new(buffer);
2244        let mut writer = StreamingJsonWriter::new(cursor).expect("Failed to create writer");
2245
2246        // Test initial state
2247        assert_eq!(writer.state, WriterState::Initial);
2248
2249        // Test header writing changes state
2250        writer.write_header(1).expect("Failed to write header");
2251        assert_eq!(writer.state, WriterState::InAllocationsArray);
2252
2253        // Test finalization changes state
2254        let _stats = writer.finalize().expect("Failed to finalize");
2255        assert_eq!(writer.state, WriterState::Finalized);
2256    }
2257
2258    #[test]
2259    fn test_zero_time_stats_edge_case() {
2260        let stats = StreamingJsonStats {
2261            bytes_written: 1000,
2262            allocations_written: 10,
2263            total_write_time_us: 0, // Zero time edge case
2264            ..Default::default()
2265        };
2266
2267        assert_eq!(stats.write_throughput(), 0.0);
2268        assert_eq!(stats.batch_processing_efficiency(), 0.0);
2269    }
2270
2271    #[test]
2272    fn test_string_escaping_comprehensive() {
2273        let buffer = Vec::new();
2274        let cursor = Cursor::new(buffer);
2275        let config = StreamingJsonWriterConfigBuilder::new()
2276            .buffer_reuse(true)
2277            .build();
2278        let mut writer =
2279            StreamingJsonWriter::with_config(cursor, config).expect("Failed to create writer");
2280
2281        writer.write_header(1).expect("Failed to write header");
2282
2283        // Test string with special characters that need escaping
2284        let allocation = PartialAllocationInfo {
2285            ptr: Some(0x1000),
2286            size: Some(1024),
2287            var_name: Some(Some(
2288                "test\"with\\quotes\nand\rnewlines\tand\x01control".to_string(),
2289            )),
2290            type_name: Some(Some("String".to_string())),
2291            scope_name: Some(None),
2292            timestamp_alloc: Some(1234567890),
2293            timestamp_dealloc: Some(None),
2294            thread_id: Some("main".to_string()),
2295            borrow_count: Some(0),
2296            stack_trace: Some(None),
2297            is_leaked: Some(false),
2298            lifetime_ms: Some(None),
2299            borrow_info: None,
2300            clone_info: None,
2301            ownership_history_available: Some(false),
2302        };
2303
2304        let requested_fields = [AllocationField::VarName].into_iter().collect();
2305
2306        let result = writer.write_allocation_selective(&allocation, &requested_fields);
2307        assert!(result.is_ok());
2308
2309        let stats = writer.finalize().expect("Failed to finalize");
2310        assert_eq!(stats.allocations_written, 1);
2311        assert!(stats.buffer_reuses > 0);
2312    }
2313
2314    #[test]
2315    fn test_string_escaping_without_buffer_reuse() {
2316        let buffer = Vec::new();
2317        let cursor = Cursor::new(buffer);
2318        let config = StreamingJsonWriterConfigBuilder::new()
2319            .buffer_reuse(false)
2320            .build();
2321        let mut writer =
2322            StreamingJsonWriter::with_config(cursor, config).expect("Failed to create writer");
2323
2324        writer.write_header(1).expect("Failed to write header");
2325
2326        let allocation = PartialAllocationInfo {
2327            ptr: Some(0x1000),
2328            size: Some(1024),
2329            var_name: Some(Some("test\"string".to_string())),
2330            type_name: Some(Some("String".to_string())),
2331            scope_name: Some(None),
2332            timestamp_alloc: Some(1234567890),
2333            timestamp_dealloc: Some(None),
2334            thread_id: Some("main".to_string()),
2335            borrow_count: Some(0),
2336            stack_trace: Some(None),
2337            is_leaked: Some(false),
2338            lifetime_ms: Some(None),
2339            borrow_info: None,
2340            clone_info: None,
2341            ownership_history_available: Some(false),
2342        };
2343
2344        let requested_fields = [AllocationField::VarName].into_iter().collect();
2345
2346        let result = writer.write_allocation_selective(&allocation, &requested_fields);
2347        assert!(result.is_ok());
2348
2349        let stats = writer.finalize().expect("Failed to finalize");
2350        assert_eq!(stats.allocations_written, 1);
2351        assert_eq!(stats.buffer_reuses, 0); // No buffer reuse
2352    }
2353}