1use tracing::{debug, warn};
7
8use crate::analysis::is_virtual_pointer;
9use crate::analysis::memory_passport_tracker::MemoryPassportTracker;
10use crate::analysis::node_id::NodeId;
11use crate::analysis::ownership_graph::{EdgeKind, OwnershipGraph, OwnershipOp};
12use crate::capture::platform::memory_info::PlatformMemoryInfo;
13use crate::core::{MemScopeError, MemScopeResult};
14use crate::render_engine::dashboard::{rebuild_allocations_from_events, DashboardRenderer};
15use crate::snapshot::{ActiveAllocation, MemorySnapshot, ThreadMemoryStats};
16use crate::tracker::Tracker;
17use rayon::prelude::*;
18use serde_json::json;
19use std::{
20 collections::HashMap,
21 fs::File,
22 io::{BufWriter, Write},
23 path::Path,
24 sync::Arc,
25};
26
27#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
29pub enum OptimizationLevel {
30 Low,
32 #[default]
34 Medium,
35 High,
37 Maximum,
39}
40
41#[derive(Debug, Clone, Default)]
43pub struct SchemaValidator {
44 strict_mode: bool,
45}
46
47impl SchemaValidator {
48 pub fn new() -> Self {
49 Self { strict_mode: false }
50 }
51
52 pub fn with_strict_mode(mut self, strict: bool) -> Self {
53 self.strict_mode = strict;
54 self
55 }
56
57 pub fn validate(&self, data: &serde_json::Value) -> Result<(), String> {
58 if !data.is_object() {
59 return Err("Export data must be a JSON object".to_string());
60 }
61
62 let obj = data.as_object().ok_or("Invalid JSON object")?;
63
64 if self.strict_mode {
65 let required_fields = ["timestamp", "allocations", "stats"];
66 for field in &required_fields {
67 if !obj.contains_key(*field) {
68 return Err(format!("Missing required field: {}", field));
69 }
70 }
71 }
72
73 Ok(())
74 }
75}
76
77#[derive(Debug, Clone)]
78pub struct ExportJsonOptions {
79 pub parallel_processing: bool,
80 pub buffer_size: usize,
81 pub use_compact_format: Option<bool>,
82 pub enable_type_cache: bool,
83 pub batch_size: usize,
84 pub streaming_writer: bool,
85 pub schema_validation: bool,
86 pub adaptive_optimization: bool,
87 pub max_cache_size: usize,
88 pub security_analysis: bool,
89 pub include_low_severity: bool,
90 pub integrity_hashes: bool,
91 pub fast_export_mode: bool,
92 pub auto_fast_export_threshold: Option<usize>,
93 pub thread_count: Option<usize>,
94}
95
96impl Default for ExportJsonOptions {
97 fn default() -> Self {
98 Self {
99 parallel_processing: true,
100 buffer_size: 256 * 1024,
101 use_compact_format: None,
102 enable_type_cache: true,
103 batch_size: 1000,
104 streaming_writer: true,
105 schema_validation: false,
106 adaptive_optimization: true,
107 max_cache_size: 10_000,
108 security_analysis: false,
109 include_low_severity: false,
110 integrity_hashes: false,
111 fast_export_mode: false,
112 auto_fast_export_threshold: Some(10_000),
113 thread_count: None,
114 }
115 }
116}
117
118impl ExportJsonOptions {
119 pub fn fast_export_mode(mut self, enabled: bool) -> Self {
120 self.fast_export_mode = enabled;
121 self
122 }
123
124 pub fn security_analysis(mut self, enabled: bool) -> Self {
125 self.security_analysis = enabled;
126 self
127 }
128
129 pub fn streaming_writer(mut self, enabled: bool) -> Self {
130 self.streaming_writer = enabled;
131 self
132 }
133
134 pub fn schema_validation(mut self, enabled: bool) -> Self {
135 self.schema_validation = enabled;
136 self
137 }
138
139 pub fn integrity_hashes(mut self, enabled: bool) -> Self {
140 self.integrity_hashes = enabled;
141 self
142 }
143
144 pub fn batch_size(mut self, size: usize) -> Self {
145 self.batch_size = size;
146 self
147 }
148
149 pub fn adaptive_optimization(mut self, enabled: bool) -> Self {
150 self.adaptive_optimization = enabled;
151 self
152 }
153
154 pub fn max_cache_size(mut self, size: usize) -> Self {
155 self.max_cache_size = size;
156 self
157 }
158
159 pub fn include_low_severity(mut self, include: bool) -> Self {
160 self.include_low_severity = include;
161 self
162 }
163
164 pub fn thread_count(mut self, count: Option<usize>) -> Self {
165 self.thread_count = count;
166 self
167 }
168}
169
170pub fn export_snapshot_to_json(
171 snapshot: &MemorySnapshot,
172 output_path: &Path,
173 options: &ExportJsonOptions,
174) -> Result<(), Box<dyn std::error::Error>> {
175 if let Some(parent) = output_path.parent() {
177 if !parent.as_os_str().is_empty() {
178 std::fs::create_dir_all(parent)?;
179 }
180 }
181
182 let allocations: Vec<&ActiveAllocation> = snapshot.active_allocations.values().collect();
183 let processed = process_allocations(&allocations, options)?;
184
185 let output_dir = if output_path.extension().is_some() {
187 output_path.parent().unwrap_or(Path::new("."))
189 } else {
190 output_path
191 };
192
193 generate_memory_analysis_json(output_dir, &processed, options)?;
194 generate_lifetime_json(output_dir, &processed, options)?;
195 generate_thread_analysis_json(output_dir, &snapshot.thread_stats, options)?;
196
197 Ok(())
198}
199
200fn process_allocations(
201 allocations: &[&ActiveAllocation],
202 options: &ExportJsonOptions,
203) -> Result<Vec<serde_json::Value>, Box<dyn std::error::Error>> {
204 if options.parallel_processing && allocations.len() > options.batch_size {
205 let chunk_size = (allocations.len() / num_cpus::get()).max(1);
206 Ok(allocations
207 .par_chunks(chunk_size)
208 .flat_map(process_allocation_batch)
209 .collect())
210 } else {
211 Ok(process_allocation_batch(allocations))
212 }
213}
214
215fn process_allocation_batch(allocations: &[&ActiveAllocation]) -> Vec<serde_json::Value> {
216 let current_time = std::time::SystemTime::now()
217 .duration_since(std::time::UNIX_EPOCH)
218 .map(|d| d.as_nanos() as u64)
219 .unwrap_or(0);
220
221 allocations
222 .iter()
223 .map(|alloc| {
224 let type_info = get_or_compute_type_info(
225 alloc.type_name.as_deref().unwrap_or("unknown"),
226 alloc.size,
227 );
228
229 let lifetime_ms = if alloc.allocated_at > 0 {
230 (current_time.saturating_sub(alloc.allocated_at)) / 1_000_000
231 } else {
232 0
233 };
234
235 let address = match alloc.ptr {
236 Some(ptr) => format!("0x{:x}", ptr),
237 None => "N/A".to_string(),
238 };
239
240 let mut entry = json!({
241 "address": address,
242 "size": alloc.size,
243 "type": type_info,
244 "timestamp": alloc.allocated_at,
245 "thread_id": alloc.thread_id,
246 "lifetime_ms": lifetime_ms,
247 });
248
249 if let Some(ref var_name) = alloc.var_name {
250 entry["var_name"] = serde_json::json!(var_name);
251 }
252
253 if let Some(ref type_name) = alloc.type_name {
254 entry["type_name"] = serde_json::json!(type_name);
255 }
256
257 entry
258 })
259 .collect()
260}
261
262fn get_or_compute_type_info(type_name: &str, size: usize) -> String {
263 if (type_name.contains("Vec<") || type_name.contains("vec::Vec<"))
265 && !type_name.contains("VecDeque")
266 {
267 "dynamic_array".to_string()
268 } else if type_name == "str"
269 || type_name == "String"
270 || type_name.contains("&str")
271 || type_name.contains("alloc::string::String")
272 {
273 "string".to_string()
274 } else if type_name.contains("Box") || type_name.contains("Rc") || type_name.contains("Arc") {
275 "smart_pointer".to_string()
276 } else if type_name.contains("[") && type_name.contains("u8") {
277 "byte_array".to_string()
278 } else if size > 1024 * 1024 {
279 "large_buffer".to_string()
280 } else {
281 "custom".to_string()
282 }
283}
284
285fn generate_memory_analysis_json<P: AsRef<Path>>(
286 output_path: P,
287 allocations: &[serde_json::Value],
288 options: &ExportJsonOptions,
289) -> Result<(), Box<dyn std::error::Error>> {
290 let total_size: usize = allocations
291 .iter()
292 .filter_map(|a| a.get("size").and_then(|s| s.as_u64()))
293 .map(|s| s as usize)
294 .sum();
295
296 let type_distribution: HashMap<String, usize> = {
297 let mut dist = HashMap::new();
298 for alloc in allocations {
299 if let Some(t) = alloc.get("type").and_then(|t| t.as_str()) {
300 *dist.entry(t.to_string()).or_insert(0) += 1;
301 }
302 }
303 dist
304 };
305
306 let data = json!({
307 "metadata": {
308 "export_version": "2.0",
309 "export_timestamp": chrono::Utc::now().to_rfc3339(),
310 "specification": "memscope-rs memory analysis",
311 "total_allocations": allocations.len(),
312 "total_size_bytes": total_size
313 },
314 "allocations": allocations,
315 "statistics": {
316 "total_allocations": allocations.len(),
317 "total_size_bytes": total_size,
318 "average_size_bytes": if allocations.is_empty() { 0 } else { total_size / allocations.len() }
319 },
320 "type_distribution": type_distribution
321 });
322
323 let path = output_path.as_ref().join("memory_analysis.json");
324 write_json_optimized(path, &data, options)?;
325 Ok(())
326}
327
328fn generate_lifetime_json<P: AsRef<Path>>(
329 output_path: P,
330 allocations: &[serde_json::Value],
331 options: &ExportJsonOptions,
332) -> Result<(), Box<dyn std::error::Error>> {
333 let ownership_histories: Vec<serde_json::Value> = allocations
334 .iter()
335 .map(|alloc| {
336 json!({
337 "address": alloc.get("address"),
338 "var_name": alloc.get("var_name"),
339 "type_name": alloc.get("type_name"),
340 "size": alloc.get("size"),
341 "timestamp_alloc": alloc.get("timestamp"),
342 "timestamp_dealloc": null,
343 "lifetime_ms": alloc.get("lifetime_ms"),
344 "events": [
345 {
346 "event_type": "Created",
347 "timestamp": alloc.get("timestamp"),
348 "context": "initial_allocation"
349 }
350 ]
351 })
352 })
353 .collect();
354
355 let lifetime_data = json!({
356 "metadata": {
357 "export_version": "2.0",
358 "export_timestamp": chrono::Utc::now().to_rfc3339(),
359 "specification": "memscope-rs lifetime tracking",
360 "total_tracked_allocations": ownership_histories.len()
361 },
362 "ownership_histories": ownership_histories
363 });
364
365 let lifetime_path = output_path.as_ref().join("lifetime.json");
366 write_json_optimized(lifetime_path, &lifetime_data, options)?;
367 Ok(())
368}
369
370fn generate_thread_analysis_json<P: AsRef<Path>>(
371 output_path: P,
372 thread_stats: &HashMap<u64, ThreadMemoryStats>,
373 options: &ExportJsonOptions,
374) -> Result<(), Box<dyn std::error::Error>> {
375 let thread_analysis: Vec<serde_json::Value> = thread_stats
376 .values()
377 .map(|stats| {
378 json!({
379 "thread_id": stats.thread_id,
380 "allocation_count": stats.allocation_count,
381 "total_allocated": stats.total_allocated,
382 "current_memory": stats.current_memory,
383 "peak_memory": stats.peak_memory,
384 })
385 })
386 .collect();
387
388 let data = json!({
389 "metadata": {
390 "export_version": "2.0",
391 "export_timestamp": chrono::Utc::now().to_rfc3339(),
392 "specification": "thread analysis",
393 "total_threads": thread_analysis.len()
394 },
395 "thread_analysis": thread_analysis
396 });
397
398 let path = output_path.as_ref().join("thread_analysis.json");
399 write_json_optimized(path, &data, options)?;
400 Ok(())
401}
402
403fn write_json_optimized<P: AsRef<Path>>(
404 path: P,
405 data: &serde_json::Value,
406 options: &ExportJsonOptions,
407) -> Result<(), Box<dyn std::error::Error>> {
408 let path = path.as_ref();
409
410 let estimated_size = estimate_json_size(data);
411 let use_compact = options
412 .use_compact_format
413 .unwrap_or(estimated_size > 1_000_000);
414
415 if options.streaming_writer && estimated_size > 500_000 {
416 let file = File::create(path)?;
417 let mut writer = BufWriter::with_capacity(options.buffer_size, file);
418
419 if use_compact {
420 serde_json::to_writer(&mut writer, data)?;
421 } else {
422 serde_json::to_writer_pretty(&mut writer, data)?;
423 }
424
425 writer.flush()?;
426 } else {
427 let json_string = if use_compact {
428 serde_json::to_string(data)?
429 } else {
430 serde_json::to_string_pretty(data)?
431 };
432 std::fs::write(path, json_string)?;
433 }
434
435 Ok(())
436}
437
438fn estimate_json_size(data: &serde_json::Value) -> usize {
439 match data {
440 serde_json::Value::Object(map) => {
441 map.values().map(estimate_json_size).sum::<usize>() + map.len() * 20
442 }
443 serde_json::Value::Array(arr) => {
444 arr.iter().map(estimate_json_size).sum::<usize>() + arr.len() * 10
445 }
446 serde_json::Value::String(s) => s.len(),
447 serde_json::Value::Number(n) => n.to_string().len(),
448 _ => 10,
449 }
450}
451
452#[derive(Debug, thiserror::Error)]
453pub enum ExportError {
454 #[error("IO error: {0}")]
455 Io(#[from] std::io::Error),
456
457 #[error("JSON error: {0}")]
458 Json(#[from] serde_json::Error),
459
460 #[error("Export failed: {0}")]
461 ExportFailed(String),
462}
463
464pub fn export_all_json<P: AsRef<Path>>(
465 path: P,
466 tracker: &Tracker,
467 passport_tracker: &Arc<MemoryPassportTracker>,
468 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
469) -> MemScopeResult<()> {
470 let path_ref = path.as_ref();
471
472 let events = tracker.event_store().snapshot();
474 let allocations = rebuild_allocations_from_events(&events);
475 let snapshot = MemorySnapshot::from_allocation_infos(allocations.clone());
476 let options = ExportJsonOptions::default();
477
478 std::fs::create_dir_all(path_ref)
479 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
480
481 debug!("Starting export_snapshot_to_json");
482
483 export_snapshot_to_json(&snapshot, path_ref, &options)
484 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
485
486 debug!("Completed export_snapshot_to_json");
487
488 debug!("Starting export_memory_passports_json");
489
490 export_memory_passports_json(path_ref, passport_tracker)
491 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
492
493 debug!("Completed export_memory_passports_json");
494
495 debug!("Starting export_leak_detection_json");
496
497 export_leak_detection_json(path_ref, passport_tracker)
498 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
499
500 debug!("Completed export_leak_detection_json");
501
502 debug!("Starting export_unsafe_ffi_json");
503
504 export_unsafe_ffi_json(path_ref, passport_tracker)
505 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
506
507 debug!("Completed export_unsafe_ffi_json");
508
509 debug!("Starting export_system_resources_json");
510
511 export_system_resources_json(path_ref)
512 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
513
514 debug!("Completed export_system_resources_json");
515
516 debug!("Starting export_async_analysis_json");
517
518 export_async_analysis_json(path_ref, async_tracker)
519 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
520
521 debug!("Completed export_async_analysis_json");
522
523 debug!("Starting export_ownership_graph_json");
524
525 let typed_allocations: Vec<crate::capture::types::AllocationInfo> =
526 allocations.clone().into_iter().map(|a| a.into()).collect();
527
528 debug!(
529 allocations = typed_allocations.len(),
530 "Converted allocations to typed format"
531 );
532
533 export_ownership_graph_json(path_ref, &typed_allocations, tracker.event_store())
534 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
535
536 debug!("Completed export_ownership_graph_json");
537
538 debug!("All exports completed successfully");
539
540 Ok(())
541}
542
543pub fn export_async_analysis_json<P: AsRef<Path>>(
545 path: P,
546 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
547) -> MemScopeResult<()> {
548 let path_ref = path.as_ref();
549 let stats = async_tracker.get_stats();
550 let profiles = async_tracker.get_all_profiles();
551 let snapshot = async_tracker.snapshot();
552
553 let async_data = json!({
554 "summary": {
555 "total_tasks": stats.total_tasks,
556 "active_tasks": stats.active_tasks,
557 "total_allocations": stats.total_allocations,
558 "total_memory_bytes": stats.total_memory,
559 "active_memory_bytes": stats.active_memory,
560 "peak_memory_bytes": stats.peak_memory,
561 },
562 "task_profiles": profiles.iter().map(|p| json!({
563 "task_id": p.task_id,
564 "task_name": p.task_name,
565 "task_type": format!("{:?}", p.task_type),
566 "created_at_ms": p.created_at_ms,
567 "completed_at_ms": p.completed_at_ms,
568 "total_bytes": p.total_bytes,
569 "current_memory": p.current_memory,
570 "peak_memory": p.peak_memory,
571 "total_allocations": p.total_allocations,
572 "total_deallocations": p.total_deallocations,
573 "duration_ns": p.duration_ns,
574 "allocation_rate": p.allocation_rate,
575 "efficiency_score": p.efficiency_score,
576 "average_allocation_size": p.average_allocation_size,
577 "is_completed": p.is_completed(),
578 "has_potential_leak": p.has_potential_leak(),
579 })).collect::<Vec<_>>(),
580 "allocations": snapshot.allocations.iter().map(|a| json!({
581 "ptr": format!("0x{:x}", a.ptr),
582 "size": a.size,
583 "timestamp": a.timestamp,
584 "task_id": a.task_id,
585 "var_name": a.var_name,
586 "type_name": a.type_name,
587 })).collect::<Vec<_>>(),
588 });
589
590 let async_path = path_ref.join("async_analysis.json");
591 let file = File::create(async_path)
592 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
593 let mut writer = BufWriter::new(file);
594 serde_json::to_writer_pretty(&mut writer, &async_data)
595 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
596 writer
597 .flush()
598 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
599
600 Ok(())
601}
602
603#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
605pub enum DashboardTemplate {
606 #[default]
608 Unified,
609 Final,
612}
613
614impl std::fmt::Display for DashboardTemplate {
615 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
616 match self {
617 DashboardTemplate::Unified => write!(f, "dashboard_unified"),
618 DashboardTemplate::Final => write!(f, "dashboard_final"),
619 }
620 }
621}
622
623pub fn export_dashboard_html<P: AsRef<Path>>(
629 path: P,
630 tracker: &Tracker,
631 passport_tracker: &Arc<MemoryPassportTracker>,
632) -> MemScopeResult<()> {
633 export_dashboard_html_with_template(
634 path,
635 tracker,
636 passport_tracker,
637 DashboardTemplate::default(),
638 None,
639 )
640}
641
642pub fn export_dashboard_html_with_async<P: AsRef<Path>>(
644 path: P,
645 tracker: &Tracker,
646 passport_tracker: &Arc<MemoryPassportTracker>,
647 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
648) -> MemScopeResult<()> {
649 export_dashboard_html_with_template(
650 path,
651 tracker,
652 passport_tracker,
653 DashboardTemplate::default(),
654 Some(async_tracker),
655 )
656}
657
658pub fn export_dashboard_html_with_template<P: AsRef<Path>>(
663 path: P,
664 tracker: &Tracker,
665 passport_tracker: &Arc<MemoryPassportTracker>,
666 template: DashboardTemplate,
667 async_tracker: Option<&Arc<crate::capture::backends::async_tracker::AsyncTracker>>,
668) -> MemScopeResult<()> {
669 let path_ref = path.as_ref();
670
671 std::fs::create_dir_all(path_ref).map_err(|e| {
673 MemScopeError::error(
674 "export",
675 "export_dashboard_html_with_template",
676 format!("Failed to create output directory: {}", e),
677 )
678 })?;
679
680 let renderer = DashboardRenderer::new().map_err(|e| {
682 MemScopeError::error(
683 "export",
684 "export_dashboard_html_with_template",
685 format!("Failed to create dashboard renderer: {}", e),
686 )
687 })?;
688
689 let context = renderer
691 .build_context_from_tracker_with_async(tracker, passport_tracker, async_tracker)
692 .map_err(|e| {
693 MemScopeError::error(
694 "export",
695 "export_dashboard_html_with_template",
696 format!("Failed to build context: {}", e),
697 )
698 })?;
699
700 let html_content = match template {
701 DashboardTemplate::Final => renderer.render_final_dashboard(&context).map_err(|e| {
702 MemScopeError::error(
703 "export",
704 "export_dashboard_html_with_template",
705 format!("Failed to render final dashboard: {}", e),
706 )
707 })?,
708 DashboardTemplate::Unified => renderer.render_unified_dashboard(&context).map_err(|e| {
709 MemScopeError::error(
710 "export",
711 "export_dashboard_html_with_template",
712 format!("Failed to render dashboard: {}", e),
713 )
714 })?,
715 };
716
717 let output_file = path_ref.join(format!("{}_dashboard.html", template));
719 std::fs::write(&output_file, html_content).map_err(|e| {
720 MemScopeError::error(
721 "export",
722 "export_dashboard_html_with_template",
723 format!("Failed to write HTML file: {}", e),
724 )
725 })?;
726
727 tracing::info!("✅ Dashboard HTML exported to: {:?}", output_file);
728
729 Ok(())
730}
731
732pub fn export_memory_passports_json<P: AsRef<Path>>(
733 base_path: P,
734 passport_tracker: &Arc<MemoryPassportTracker>,
735) -> MemScopeResult<()> {
736 let base_path = base_path.as_ref();
737 let passports = passport_tracker.get_all_passports();
738
739 let passport_data: Vec<_> = passports
740 .values()
741 .map(|p| {
742 serde_json::json!({
743 "passport_id": p.passport_id,
744 "allocation_ptr": format!("0x{:x}", p.allocation_ptr),
745 "size_bytes": p.size_bytes,
746 "created_at": p.created_at,
747 "lifecycle_events": p.lifecycle_events.len(),
748 "status": format!("{:?}", p.status_at_shutdown),
749 })
750 })
751 .collect();
752
753 let json_data = serde_json::json!({
754 "metadata": {
755 "export_version": "2.0",
756 "specification": "memory passport tracking",
757 "total_passports": passports.len()
758 },
759 "memory_passports": passport_data,
760 });
761
762 let file_path = base_path.join("memory_passports.json");
763 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
764 MemScopeError::error("export", "export_memory_passports_json", e.to_string())
765 })?;
766 std::fs::write(&file_path, json_string).map_err(|e| {
767 MemScopeError::error("export", "export_memory_passports_json", e.to_string())
768 })?;
769
770 Ok(())
771}
772
773pub fn export_leak_detection_json<P: AsRef<Path>>(
774 base_path: P,
775 passport_tracker: &Arc<MemoryPassportTracker>,
776) -> MemScopeResult<()> {
777 let base_path = base_path.as_ref();
778 let leak_result = passport_tracker.detect_leaks_at_shutdown();
779
780 let leak_details: Vec<_> = leak_result
781 .leak_details
782 .iter()
783 .map(|detail| {
784 serde_json::json!({
785 "passport_id": detail.passport_id,
786 "memory_address": format!("0x{:x}", detail.memory_address),
787 "size_bytes": detail.size_bytes,
788 "lifecycle_summary": detail.lifecycle_summary,
789 })
790 })
791 .collect();
792
793 let json_data = serde_json::json!({
794 "metadata": {
795 "export_version": "2.0",
796 "specification": "leak detection",
797 "leaks_detected": leak_result.total_leaks
798 },
799 "leak_detection": {
800 "total_leaks": leak_result.total_leaks,
801 "leak_details": leak_details
802 }
803 });
804
805 let file_path = base_path.join("leak_detection.json");
806 let json_string = serde_json::to_string_pretty(&json_data)
807 .map_err(|e| MemScopeError::error("export", "export_leak_detection_json", e.to_string()))?;
808 std::fs::write(&file_path, json_string)
809 .map_err(|e| MemScopeError::error("export", "export_leak_detection_json", e.to_string()))?;
810
811 Ok(())
812}
813
814pub fn export_unsafe_ffi_json<P: AsRef<Path>>(
815 base_path: P,
816 passport_tracker: &Arc<MemoryPassportTracker>,
817) -> MemScopeResult<()> {
818 use crate::analysis::memory_passport_tracker::PassportStatus;
819
820 let base_path = base_path.as_ref();
821 let passports = passport_tracker.get_all_passports();
822
823 let ffi_reports: Vec<_> = passports
824 .values()
825 .filter(|p| {
826 matches!(
827 p.status_at_shutdown,
828 PassportStatus::HandoverToFfi
829 | PassportStatus::InForeignCustody
830 | PassportStatus::FreedByForeign
831 )
832 })
833 .map(|p| {
834 serde_json::json!({
835 "passport_id": p.passport_id,
836 "allocation_ptr": format!("0x{:x}", p.allocation_ptr),
837 "size_bytes": p.size_bytes,
838 "status": format!("{:?}", p.status_at_shutdown),
839 "created_at": p.created_at,
840 "boundary_events": p.lifecycle_events.iter().map(|e| {
841 serde_json::json!({
842 "timestamp": e.timestamp,
843 "event_type": format!("{:?}", e.event_type),
844 "context": e.context,
845 })
846 }).collect::<Vec<_>>(),
847 })
848 })
849 .collect();
850
851 let json_data = serde_json::json!({
852 "metadata": {
853 "export_version": "2.0",
854 "specification": "unsafe FFI tracking",
855 "total_ffi_reports": ffi_reports.len(),
856 "total_memory_passports": passports.len()
857 },
858 "unsafe_reports": ffi_reports,
859 "memory_passports": passports.len()
860 });
861
862 let file_path = base_path.join("unsafe_ffi.json");
863 let json_string = serde_json::to_string_pretty(&json_data)
864 .map_err(|e| MemScopeError::error("export", "export_unsafe_ffi_json", e.to_string()))?;
865 std::fs::write(&file_path, json_string)
866 .map_err(|e| MemScopeError::error("export", "export_unsafe_ffi_json", e.to_string()))?;
867
868 Ok(())
869}
870
871pub fn export_system_resources_json<P: AsRef<Path>>(base_path: P) -> MemScopeResult<()> {
878 let base_path = base_path.as_ref();
879
880 let mut memory_info = PlatformMemoryInfo::new();
882 let _ = memory_info.initialize();
883
884 let memory_stats = match memory_info.collect_stats() {
885 Ok(stats) => stats,
886 Err(e) => {
887 warn!(error = %e, "Failed to collect memory stats");
888 return Err(MemScopeError::error(
889 "export",
890 "export_system_resources_json",
891 e.to_string(),
892 ));
893 }
894 };
895
896 let system_info = match memory_info.get_system_info() {
898 Ok(info) => info,
899 Err(e) => {
900 warn!(error = %e, "Failed to collect system info");
901 return Err(MemScopeError::error(
902 "export",
903 "export_system_resources_json",
904 e.to_string(),
905 ));
906 }
907 };
908
909 let json_data = serde_json::json!({
911 "metadata": {
912 "export_version": "2.0",
913 "specification": "system resource monitoring",
914 "timestamp": std::time::SystemTime::now()
915 .duration_since(std::time::UNIX_EPOCH)
916 .unwrap_or_default()
917 .as_secs()
918 },
919 "system_info": {
920 "os_name": system_info.os_name,
921 "os_version": system_info.os_version,
922 "architecture": system_info.architecture,
923 "cpu_cores": system_info.cpu_cores,
924 "page_size": system_info.page_size,
925 "large_page_size": system_info.large_page_size,
926 "cpu_cache": {
927 "l1_cache_size": system_info.cpu_cache.l1_cache_size,
928 "l2_cache_size": system_info.cpu_cache.l2_cache_size,
929 "l3_cache_size": system_info.cpu_cache.l3_cache_size,
930 "cache_line_size": system_info.cpu_cache.cache_line_size
931 },
932 "mmu_info": {
933 "virtual_address_bits": system_info.mmu_info.virtual_address_bits,
934 "physical_address_bits": system_info.mmu_info.physical_address_bits,
935 "aslr_enabled": system_info.mmu_info.aslr_enabled,
936 "nx_bit_supported": system_info.mmu_info.nx_bit_supported
937 }
938 },
939 "memory_stats": {
940 "virtual_memory": {
941 "total_virtual": memory_stats.virtual_memory.total_virtual,
942 "available_virtual": memory_stats.virtual_memory.available_virtual,
943 "used_virtual": memory_stats.virtual_memory.used_virtual,
944 "reserved": memory_stats.virtual_memory.reserved,
945 "committed": memory_stats.virtual_memory.committed
946 },
947 "physical_memory": {
948 "total_physical": memory_stats.physical_memory.total_physical,
949 "available_physical": memory_stats.physical_memory.available_physical,
950 "used_physical": memory_stats.physical_memory.used_physical,
951 "cached": memory_stats.physical_memory.cached,
952 "buffers": memory_stats.physical_memory.buffers,
953 "swap": {
954 "total_swap": memory_stats.physical_memory.swap.total_swap,
955 "used_swap": memory_stats.physical_memory.swap.used_swap,
956 "available_swap": memory_stats.physical_memory.swap.available_swap,
957 "swap_in_rate": memory_stats.physical_memory.swap.swap_in_rate,
958 "swap_out_rate": memory_stats.physical_memory.swap.swap_out_rate
959 }
960 },
961 "process_memory": {
962 "virtual_size": memory_stats.process_memory.virtual_size,
963 "resident_size": memory_stats.process_memory.resident_size,
964 "shared_size": memory_stats.process_memory.shared_size,
965 "private_size": memory_stats.process_memory.private_size,
966 "heap_size": memory_stats.process_memory.heap_size,
967 "stack_size": memory_stats.process_memory.stack_size,
968 "mapped_files": memory_stats.process_memory.mapped_files,
969 "peak_usage": memory_stats.process_memory.peak_usage
970 },
971 "system_memory": {
972 "allocation_count": memory_stats.system_memory.allocation_count,
973 "deallocation_count": memory_stats.system_memory.deallocation_count,
974 "active_allocations": memory_stats.system_memory.active_allocations,
975 "total_allocated": memory_stats.system_memory.total_allocated,
976 "total_deallocated": memory_stats.system_memory.total_deallocated,
977 "fragmentation_level": memory_stats.system_memory.fragmentation_level,
978 "large_pages": {
979 "supported": memory_stats.system_memory.large_pages.supported,
980 "total_large_pages": memory_stats.system_memory.large_pages.total_large_pages,
981 "used_large_pages": memory_stats.system_memory.large_pages.used_large_pages,
982 "page_size": memory_stats.system_memory.large_pages.page_size
983 }
984 },
985 "pressure_indicators": {
986 "pressure_level": format!("{:?}", memory_stats.pressure_indicators.pressure_level),
987 "low_memory": memory_stats.pressure_indicators.low_memory,
988 "swapping_active": memory_stats.pressure_indicators.swapping_active,
989 "allocation_failure_rate": memory_stats.pressure_indicators.allocation_failure_rate,
990 "gc_pressure": memory_stats.pressure_indicators.gc_pressure
991 }
992 }
993 });
994
995 let file_path = base_path.join("system_resources.json");
996 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
997 MemScopeError::error("export", "export_system_resources_json", e.to_string())
998 })?;
999 std::fs::write(&file_path, json_string).map_err(|e| {
1000 MemScopeError::error("export", "export_system_resources_json", e.to_string())
1001 })?;
1002
1003 Ok(())
1004}
1005
1006pub fn export_ownership_graph_json<P: AsRef<Path>>(
1014 base_path: P,
1015 allocations: &[crate::capture::types::AllocationInfo],
1016 event_store: &crate::event_store::EventStore,
1017) -> MemScopeResult<()> {
1018 let base_path = base_path.as_ref();
1019
1020 let graph = build_ownership_graph_from_allocations(allocations, event_store);
1022
1023 let diagnostics = graph.diagnostics(50);
1025
1026 let nodes_json: Vec<_> = graph
1028 .nodes
1029 .iter()
1030 .map(|node| {
1031 json!({
1032 "id": format!("0x{:x}", node.id.0),
1033 "type_name": node.type_name,
1034 "size": node.size,
1035 })
1036 })
1037 .collect();
1038
1039 let edges_json: Vec<_> = graph
1041 .edges
1042 .iter()
1043 .map(|edge| {
1044 json!({
1045 "from": format!("0x{:x}", edge.from.0),
1046 "to": format!("0x{:x}", edge.to.0),
1047 "kind": match edge.op {
1048 EdgeKind::Owns => "Owns",
1049 EdgeKind::Contains => "Contains",
1050 EdgeKind::Borrows => "Borrows",
1051 EdgeKind::RcClone => "RcClone",
1052 EdgeKind::ArcClone => "ArcClone",
1053 },
1054 })
1055 })
1056 .collect();
1057
1058 let cycles_json: Vec<_> = graph
1060 .cycles
1061 .iter()
1062 .map(|cycle| {
1063 let nodes: Vec<_> = cycle.iter().map(|id| format!("0x{:x}", id.0)).collect();
1064 json!({
1065 "nodes": nodes,
1066 })
1067 })
1068 .collect();
1069
1070 let issues_json: Vec<_> = diagnostics
1072 .issues
1073 .iter()
1074 .map(|issue| match issue {
1075 crate::analysis::ownership_graph::DiagnosticIssue::RcCycle { nodes, cycle_type } => {
1076 json!({
1077 "type": "RcCycle",
1078 "cycle_type": format!("{:?}", cycle_type),
1079 "nodes": nodes.iter().map(|id| format!("0x{:x}", id.0)).collect::<Vec<_>>(),
1080 "severity": "error",
1081 })
1082 }
1083 crate::analysis::ownership_graph::DiagnosticIssue::ArcCloneStorm {
1084 clone_count,
1085 threshold,
1086 } => {
1087 json!({
1088 "type": "ArcCloneStorm",
1089 "clone_count": clone_count,
1090 "threshold": threshold,
1091 "severity": "warning",
1092 })
1093 }
1094 })
1095 .collect();
1096
1097 let root_cause_json = graph.find_root_cause().map(|rc| {
1099 json!({
1100 "cause": match rc.root_cause {
1101 crate::analysis::ownership_graph::RootCause::ArcCloneStorm => "ArcCloneStorm",
1102 crate::analysis::ownership_graph::RootCause::RcCycle => "RcCycle",
1103 },
1104 "description": rc.description,
1105 "impact": rc.impact,
1106 })
1107 });
1108
1109 let json_data = json!({
1110 "metadata": {
1111 "export_version": "2.0",
1112 "specification": "ownership graph analysis",
1113 "timestamp": std::time::SystemTime::now()
1114 .duration_since(std::time::UNIX_EPOCH)
1115 .unwrap_or_default()
1116 .as_secs()
1117 },
1118 "summary": {
1119 "total_nodes": graph.nodes.len(),
1120 "total_edges": graph.edges.len(),
1121 "total_cycles": graph.cycles.len(),
1122 "rc_clone_count": diagnostics.rc_clone_count,
1123 "arc_clone_count": diagnostics.arc_clone_count,
1124 "has_issues": diagnostics.has_issues(),
1125 },
1126 "nodes": nodes_json,
1127 "edges": edges_json,
1128 "cycles": cycles_json,
1129 "diagnostics": {
1130 "issues": issues_json,
1131 "root_cause": root_cause_json,
1132 },
1133 });
1134
1135 let file_path = base_path.join("ownership_graph.json");
1136 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
1137 MemScopeError::error("export", "export_ownership_graph_json", e.to_string())
1138 })?;
1139 std::fs::write(&file_path, json_string).map_err(|e| {
1140 MemScopeError::error("export", "export_ownership_graph_json", e.to_string())
1141 })?;
1142
1143 Ok(())
1144}
1145
1146fn build_ownership_graph_from_allocations(
1148 allocations: &[crate::capture::types::AllocationInfo],
1149 event_store: &crate::event_store::EventStore,
1150) -> OwnershipGraph {
1151 debug!(
1152 allocations = allocations.len(),
1153 "Starting build_ownership_graph"
1154 );
1155 use crate::analysis::relation_inference::{detect_containers, Relation, RelationGraphBuilder};
1156 use crate::event_store::MemoryEventType;
1157
1158 debug!("Converting allocations to passports");
1161 let passports: Vec<(
1162 NodeId,
1163 String,
1164 usize,
1165 Vec<crate::analysis::ownership_graph::OwnershipEvent>,
1166 )> = allocations
1167 .iter()
1168 .enumerate()
1169 .map(|(idx, alloc)| {
1170 let unique_ptr = if alloc.ptr == 0 {
1171 crate::analysis::VIRTUAL_PTR_BASE + idx
1172 } else {
1173 alloc.ptr
1174 };
1175 let id = NodeId::from_ptr(unique_ptr);
1176 let type_name = alloc
1177 .type_name
1178 .clone()
1179 .unwrap_or_else(|| "unknown".to_string());
1180 let size = alloc.size;
1181
1182 let events = vec![crate::analysis::ownership_graph::OwnershipEvent::new(
1184 alloc.timestamp_alloc,
1185 OwnershipOp::Create,
1186 id,
1187 None,
1188 )];
1189
1190 if type_name.contains("Arc<") || type_name.contains("Rc<") {
1192 }
1196
1197 (id, type_name, size, events)
1198 })
1199 .collect();
1200 debug!(passports = passports.len(), "Created passports");
1201
1202 debug!("Building initial ownership graph");
1203 let mut graph = OwnershipGraph::build(&passports);
1204 debug!(
1205 nodes = graph.nodes.len(),
1206 edges = graph.edges.len(),
1207 "Initial graph built"
1208 );
1209
1210 let _heap_owner_original_indices: Vec<usize> = allocations
1215 .iter()
1216 .enumerate()
1217 .filter(|(_, a)| a.timestamp_dealloc.is_none())
1218 .map(|(i, _)| i)
1219 .collect();
1220
1221 let heap_owner_allocations: Vec<ActiveAllocation> = allocations
1222 .iter()
1223 .enumerate()
1224 .filter(|(_, a)| a.timestamp_dealloc.is_none())
1225 .filter_map(|(_idx, a)| {
1226 if a.ptr == 0 || is_virtual_pointer(a.ptr) {
1228 return None;
1229 }
1230
1231 Some(ActiveAllocation {
1232 ptr: Some(a.ptr),
1233 kind: crate::core::types::TrackKind::HeapOwner {
1234 ptr: a.ptr,
1235 size: a.size,
1236 },
1237 size: a.size,
1238 allocated_at: a.timestamp_alloc,
1239 var_name: a.var_name.clone(),
1240 type_name: a.type_name.clone(),
1241 thread_id: a.thread_id_u64,
1242 call_stack_hash: None,
1243 })
1244 })
1245 .collect();
1246
1247 let valid_thread_ids: std::collections::HashSet<u64> =
1250 heap_owner_allocations.iter().map(|a| a.thread_id).collect();
1251
1252 let container_events: Vec<_> = event_store
1253 .snapshot()
1254 .into_iter()
1255 .filter(|e| e.event_type == MemoryEventType::Metadata)
1256 .filter(|e| valid_thread_ids.contains(&e.thread_id))
1257 .filter_map(|e| {
1258 let type_name = e.type_name.clone().unwrap_or_default();
1259 let var_name = e.var_name.clone().unwrap_or_default();
1260 let is_container = type_name.contains("HashMap")
1261 || type_name.contains("BTreeMap")
1262 || type_name.contains("VecDeque")
1263 || type_name.contains("RefCell")
1264 || type_name.contains("RwLock");
1265 if is_container {
1266 return Some((e, type_name, var_name));
1267 }
1268 None
1269 })
1270 .collect();
1271
1272 let container_allocations: Vec<ActiveAllocation> = container_events
1274 .iter()
1275 .enumerate()
1276 .map(|(idx, (e, type_name, var_name))| {
1277 let virtual_ptr = 0x300000000u64 as usize + idx;
1278 ActiveAllocation {
1279 ptr: Some(virtual_ptr),
1280 kind: crate::core::types::TrackKind::Container,
1281 size: e.size.max(1),
1282 allocated_at: e.timestamp,
1283 var_name: Some(var_name.clone()),
1284 type_name: Some(type_name.clone()),
1285 thread_id: e.thread_id,
1286 call_stack_hash: None,
1287 }
1288 })
1289 .collect();
1290
1291 let mut all_for_relation: Vec<ActiveAllocation> = Vec::new();
1293 all_for_relation.extend(heap_owner_allocations.clone());
1294 all_for_relation.extend(container_allocations.clone());
1295
1296 debug!("Running container detection");
1298 let container_config = crate::analysis::relation_inference::ContainerConfig {
1299 time_window_ns: 10_000_000, size_ratio: 10000, lookahead: 10, };
1303
1304 let container_edges = detect_containers(&all_for_relation, Some(container_config));
1305 debug!(
1306 container_edges = container_edges.len(),
1307 "Container detection completed"
1308 );
1309
1310 debug!("Running RelationGraphBuilder");
1312 let relation_graph = RelationGraphBuilder::build(&heap_owner_allocations, None);
1313 debug!(
1314 edges = relation_graph.edges.len(),
1315 "RelationGraphBuilder completed"
1316 );
1317
1318 const CONTAINER_PTR_BASE: usize = 0x300000000;
1321 let heap_owner_count = graph.nodes.len();
1322 for (idx, (e, type_name, _var_name)) in container_events.iter().enumerate() {
1323 let node_id = NodeId::from_ptr(CONTAINER_PTR_BASE + idx);
1324 graph.nodes.push(crate::analysis::ownership_graph::Node {
1325 id: node_id,
1326 type_name: type_name.clone(),
1327 size: e.size,
1328 });
1329 }
1330
1331 for edge in &relation_graph.edges {
1333 let from_id = graph.nodes[edge.from].id;
1334 let to_id = graph.nodes[edge.to].id;
1335
1336 let edge_kind = match edge.relation {
1337 Relation::Owns => EdgeKind::Owns,
1338 Relation::Contains => EdgeKind::Contains,
1339 Relation::Slice => EdgeKind::Borrows,
1340 Relation::Clone => EdgeKind::RcClone,
1341 Relation::Shares => EdgeKind::ArcClone,
1342 Relation::Evolution => EdgeKind::Contains,
1343 };
1344
1345 graph.edges.push(crate::analysis::ownership_graph::Edge {
1346 from: from_id,
1347 to: to_id,
1348 op: edge_kind,
1349 });
1350 }
1351
1352 for edge in &container_edges {
1354 let from_all_idx = edge.from;
1357 let to_all_idx = edge.to;
1358
1359 if from_all_idx < heap_owner_count || to_all_idx >= heap_owner_count {
1361 continue; }
1363
1364 let container_graph_idx = from_all_idx - heap_owner_count;
1366 if container_graph_idx >= container_events.len() {
1367 continue; }
1369
1370 let from_id = graph.nodes[heap_owner_count + container_graph_idx].id;
1371 let to_id = graph.nodes[to_all_idx].id;
1372
1373 graph.edges.push(crate::analysis::ownership_graph::Edge {
1374 from: from_id,
1375 to: to_id,
1376 op: EdgeKind::Contains,
1377 });
1378 }
1379
1380 debug!(
1381 nodes = graph.nodes.len(),
1382 edges = graph.edges.len(),
1383 "Final ownership graph built"
1384 );
1385 graph
1386}
1387
1388#[cfg(test)]
1389mod tests {
1390 use super::*;
1391
1392 #[test]
1395 fn test_optimization_level_variants() {
1396 let _low = OptimizationLevel::Low;
1397 let _medium = OptimizationLevel::Medium;
1398 let _high = OptimizationLevel::High;
1399 let _maximum = OptimizationLevel::Maximum;
1400 }
1401
1402 #[test]
1405 fn test_optimization_level_default() {
1406 let level = OptimizationLevel::default();
1407 assert!(
1408 matches!(level, OptimizationLevel::Medium),
1409 "Default should be Medium"
1410 );
1411 }
1412
1413 #[test]
1416 fn test_schema_validator_new() {
1417 let validator = SchemaValidator::new();
1418 let data = serde_json::json!({"test": "value"});
1419 let result = validator.validate(&data);
1420 assert!(result.is_ok(), "Validation should pass for any object");
1421 }
1422
1423 #[test]
1426 fn test_schema_validator_strict_mode() {
1427 let validator = SchemaValidator::new().with_strict_mode(true);
1428
1429 let missing_fields = serde_json::json!({"other": "data"});
1430 let result = validator.validate(&missing_fields);
1431 assert!(result.is_err(), "Should fail with missing required fields");
1432
1433 let valid_data = serde_json::json!({
1434 "timestamp": 123,
1435 "allocations": [],
1436 "stats": {}
1437 });
1438 let result = validator.validate(&valid_data);
1439 assert!(result.is_ok(), "Should pass with all required fields");
1440 }
1441
1442 #[test]
1445 fn test_schema_validator_non_object() {
1446 let validator = SchemaValidator::new();
1447 let data = serde_json::json!("not an object");
1448 let result = validator.validate(&data);
1449 assert!(result.is_err(), "Should reject non-object data");
1450 }
1451
1452 #[test]
1455 fn test_export_json_options_default() {
1456 let options = ExportJsonOptions::default();
1457 assert!(
1458 options.parallel_processing,
1459 "parallel_processing should be true by default"
1460 );
1461 assert!(
1462 options.streaming_writer,
1463 "streaming_writer should be true by default"
1464 );
1465 assert!(
1466 options.enable_type_cache,
1467 "enable_type_cache should be true by default"
1468 );
1469 assert!(
1470 options.adaptive_optimization,
1471 "adaptive_optimization should be true by default"
1472 );
1473 assert!(
1474 !options.schema_validation,
1475 "schema_validation should be false by default"
1476 );
1477 assert!(
1478 !options.security_analysis,
1479 "security_analysis should be false by default"
1480 );
1481 }
1482
1483 #[test]
1486 fn test_export_json_options_builders() {
1487 let options = ExportJsonOptions::default()
1488 .fast_export_mode(true)
1489 .security_analysis(true)
1490 .streaming_writer(false)
1491 .schema_validation(true)
1492 .integrity_hashes(true)
1493 .batch_size(500)
1494 .adaptive_optimization(false)
1495 .max_cache_size(5000)
1496 .include_low_severity(true)
1497 .thread_count(Some(4));
1498
1499 assert!(options.fast_export_mode, "fast_export_mode should be true");
1500 assert!(
1501 options.security_analysis,
1502 "security_analysis should be true"
1503 );
1504 assert!(
1505 !options.streaming_writer,
1506 "streaming_writer should be false"
1507 );
1508 assert!(
1509 options.schema_validation,
1510 "schema_validation should be true"
1511 );
1512 assert!(options.integrity_hashes, "integrity_hashes should be true");
1513 assert_eq!(options.batch_size, 500, "batch_size should be 500");
1514 assert!(
1515 !options.adaptive_optimization,
1516 "adaptive_optimization should be false"
1517 );
1518 assert_eq!(
1519 options.max_cache_size, 5000,
1520 "max_cache_size should be 5000"
1521 );
1522 assert!(
1523 options.include_low_severity,
1524 "include_low_severity should be true"
1525 );
1526 assert_eq!(
1527 options.thread_count,
1528 Some(4),
1529 "thread_count should be Some(4)"
1530 );
1531 }
1532
1533 #[test]
1536 fn test_export_error_variants() {
1537 let io_err = ExportError::Io(std::io::Error::new(std::io::ErrorKind::NotFound, "test"));
1538 let json_err = ExportError::Json(serde_json::from_str::<i32>("invalid").unwrap_err());
1539 let export_err = ExportError::ExportFailed("test error".to_string());
1540
1541 assert!(
1542 format!("{}", io_err).contains("IO error"),
1543 "Should contain IO error"
1544 );
1545 assert!(
1546 format!("{}", json_err).contains("JSON error"),
1547 "Should contain JSON error"
1548 );
1549 assert!(
1550 format!("{}", export_err).contains("Export failed"),
1551 "Should contain Export failed"
1552 );
1553 }
1554
1555 #[test]
1558 fn test_estimate_json_size() {
1559 let string_val = serde_json::json!("hello world");
1560 let num_val = serde_json::json!(42);
1561 let array_val = serde_json::json!([1, 2, 3]);
1562 let object_val = serde_json::json!({"key": "value"});
1563
1564 let string_size = estimate_json_size(&string_val);
1565 let num_size = estimate_json_size(&num_val);
1566 let array_size = estimate_json_size(&array_val);
1567 let object_size = estimate_json_size(&object_val);
1568
1569 assert!(string_size > 0, "String size should be positive");
1570 assert!(num_size > 0, "Number size should be positive");
1571 assert!(array_size > 0, "Array size should be positive");
1572 assert!(object_size > 0, "Object size should be positive");
1573 }
1574
1575 #[test]
1578 fn test_get_or_compute_type_info() {
1579 assert_eq!(
1580 get_or_compute_type_info("Vec<i32>", 100),
1581 "dynamic_array",
1582 "Vec should be dynamic_array"
1583 );
1584 assert_eq!(
1585 get_or_compute_type_info("String", 24),
1586 "string",
1587 "String should be string"
1588 );
1589 assert_eq!(
1590 get_or_compute_type_info("Box<i32>", 8),
1591 "smart_pointer",
1592 "Box should be smart_pointer"
1593 );
1594 assert_eq!(
1595 get_or_compute_type_info("Rc<String>", 8),
1596 "smart_pointer",
1597 "Rc should be smart_pointer"
1598 );
1599 assert_eq!(
1600 get_or_compute_type_info("Arc<i32>", 16),
1601 "smart_pointer",
1602 "Arc should be smart_pointer"
1603 );
1604 assert_eq!(
1605 get_or_compute_type_info("[u8; 100]", 100),
1606 "byte_array",
1607 "u8 array should be byte_array"
1608 );
1609 assert_eq!(
1610 get_or_compute_type_info("CustomType", 1024 * 1024 * 2),
1611 "large_buffer",
1612 "Large allocation should be large_buffer"
1613 );
1614 assert_eq!(
1615 get_or_compute_type_info("MyType", 100),
1616 "custom",
1617 "Unknown type should be custom"
1618 );
1619 }
1620}