1use tracing::{debug, warn};
7
8use crate::analysis::is_virtual_pointer;
9use crate::analysis::memory_passport_tracker::MemoryPassportTracker;
10use crate::analysis::node_id::NodeId;
11use crate::analysis::ownership_graph::{EdgeKind, OwnershipGraph, OwnershipOp};
12use crate::capture::platform::memory_info::PlatformMemoryInfo;
13use crate::core::{MemScopeError, MemScopeResult};
14use crate::render_engine::dashboard::{rebuild_allocations_from_events, DashboardRenderer};
15use crate::snapshot::{ActiveAllocation, MemorySnapshot, ThreadMemoryStats};
16use crate::tracker::Tracker;
17use rayon::prelude::*;
18use serde_json::json;
19use std::{
20 collections::HashMap,
21 fs::File,
22 io::{BufWriter, Write},
23 path::Path,
24 sync::Arc,
25};
26
27#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
29pub enum OptimizationLevel {
30 Low,
32 #[default]
34 Medium,
35 High,
37 Maximum,
39}
40
41#[derive(Debug, Clone, Default)]
43pub struct SchemaValidator {
44 strict_mode: bool,
45}
46
47impl SchemaValidator {
48 pub fn new() -> Self {
49 Self { strict_mode: false }
50 }
51
52 pub fn with_strict_mode(mut self, strict: bool) -> Self {
53 self.strict_mode = strict;
54 self
55 }
56
57 pub fn validate(&self, data: &serde_json::Value) -> Result<(), String> {
58 if !data.is_object() {
59 return Err("Export data must be a JSON object".to_string());
60 }
61
62 let obj = data.as_object().ok_or("Invalid JSON object")?;
63
64 if self.strict_mode {
65 let required_fields = ["timestamp", "allocations", "stats"];
66 for field in &required_fields {
67 if !obj.contains_key(*field) {
68 return Err(format!("Missing required field: {}", field));
69 }
70 }
71 }
72
73 Ok(())
74 }
75}
76
77#[derive(Debug, Clone)]
78pub struct ExportJsonOptions {
79 pub parallel_processing: bool,
80 pub buffer_size: usize,
81 pub use_compact_format: Option<bool>,
82 pub enable_type_cache: bool,
83 pub batch_size: usize,
84 pub streaming_writer: bool,
85 pub schema_validation: bool,
86 pub adaptive_optimization: bool,
87 pub max_cache_size: usize,
88 pub security_analysis: bool,
89 pub include_low_severity: bool,
90 pub integrity_hashes: bool,
91 pub fast_export_mode: bool,
92 pub auto_fast_export_threshold: Option<usize>,
93 pub thread_count: Option<usize>,
94}
95
96impl Default for ExportJsonOptions {
97 fn default() -> Self {
98 Self {
99 parallel_processing: true,
100 buffer_size: 256 * 1024,
101 use_compact_format: None,
102 enable_type_cache: true,
103 batch_size: 1000,
104 streaming_writer: true,
105 schema_validation: false,
106 adaptive_optimization: true,
107 max_cache_size: 10_000,
108 security_analysis: false,
109 include_low_severity: false,
110 integrity_hashes: false,
111 fast_export_mode: false,
112 auto_fast_export_threshold: Some(10_000),
113 thread_count: None,
114 }
115 }
116}
117
118impl ExportJsonOptions {
119 pub fn fast_export_mode(mut self, enabled: bool) -> Self {
120 self.fast_export_mode = enabled;
121 self
122 }
123
124 pub fn security_analysis(mut self, enabled: bool) -> Self {
125 self.security_analysis = enabled;
126 self
127 }
128
129 pub fn streaming_writer(mut self, enabled: bool) -> Self {
130 self.streaming_writer = enabled;
131 self
132 }
133
134 pub fn schema_validation(mut self, enabled: bool) -> Self {
135 self.schema_validation = enabled;
136 self
137 }
138
139 pub fn integrity_hashes(mut self, enabled: bool) -> Self {
140 self.integrity_hashes = enabled;
141 self
142 }
143
144 pub fn batch_size(mut self, size: usize) -> Self {
145 self.batch_size = size;
146 self
147 }
148
149 pub fn adaptive_optimization(mut self, enabled: bool) -> Self {
150 self.adaptive_optimization = enabled;
151 self
152 }
153
154 pub fn max_cache_size(mut self, size: usize) -> Self {
155 self.max_cache_size = size;
156 self
157 }
158
159 pub fn include_low_severity(mut self, include: bool) -> Self {
160 self.include_low_severity = include;
161 self
162 }
163
164 pub fn thread_count(mut self, count: Option<usize>) -> Self {
165 self.thread_count = count;
166 self
167 }
168}
169
170pub fn export_snapshot_to_json(
171 snapshot: &MemorySnapshot,
172 output_path: &Path,
173 options: &ExportJsonOptions,
174) -> Result<(), Box<dyn std::error::Error>> {
175 if let Some(parent) = output_path.parent() {
177 if !parent.as_os_str().is_empty() {
178 std::fs::create_dir_all(parent)?;
179 }
180 }
181
182 let allocations: Vec<&ActiveAllocation> = snapshot.active_allocations.values().collect();
183 let processed = process_allocations(&allocations, options)?;
184
185 let output_dir = if output_path.extension().is_some() {
187 output_path.parent().unwrap_or(Path::new("."))
189 } else {
190 output_path
191 };
192
193 generate_memory_analysis_json(output_dir, &processed, options)?;
194 generate_lifetime_json(output_dir, &processed, options)?;
195 generate_thread_analysis_json(output_dir, &snapshot.thread_stats, options)?;
196
197 Ok(())
198}
199
200fn process_allocations(
201 allocations: &[&ActiveAllocation],
202 options: &ExportJsonOptions,
203) -> Result<Vec<serde_json::Value>, Box<dyn std::error::Error>> {
204 if options.parallel_processing && allocations.len() > options.batch_size {
205 let chunk_size = (allocations.len() / num_cpus::get()).max(1);
206 Ok(allocations
207 .par_chunks(chunk_size)
208 .flat_map(process_allocation_batch)
209 .collect())
210 } else {
211 Ok(process_allocation_batch(allocations))
212 }
213}
214
215fn process_allocation_batch(allocations: &[&ActiveAllocation]) -> Vec<serde_json::Value> {
216 let current_time = std::time::SystemTime::now()
217 .duration_since(std::time::UNIX_EPOCH)
218 .map(|d| d.as_nanos() as u64)
219 .unwrap_or(0);
220
221 allocations
222 .iter()
223 .map(|alloc| {
224 let type_info = get_or_compute_type_info(
225 alloc.type_name.as_deref().unwrap_or("unknown"),
226 alloc.size,
227 );
228
229 let lifetime_ms = if alloc.allocated_at > 0 {
230 (current_time.saturating_sub(alloc.allocated_at)) / 1_000_000
231 } else {
232 0
233 };
234
235 let address = match alloc.ptr {
236 Some(ptr) => format!("0x{:x}", ptr),
237 None => "N/A".to_string(),
238 };
239
240 let mut entry = json!({
241 "address": address,
242 "size": alloc.size,
243 "type": type_info,
244 "timestamp": alloc.allocated_at,
245 "thread_id": alloc.thread_id,
246 "lifetime_ms": lifetime_ms,
247 });
248
249 if let Some(ref var_name) = alloc.var_name {
250 entry["var_name"] = serde_json::json!(var_name);
251 }
252
253 if let Some(ref type_name) = alloc.type_name {
254 entry["type_name"] = serde_json::json!(type_name);
255 }
256
257 if let Some(ref module_path) = alloc.module_path {
258 if !is_library_module_path(module_path) {
260 entry["module_path"] = serde_json::json!(module_path);
261 }
262 }
263
264 entry
265 })
266 .collect()
267}
268
269fn is_library_module_path(module_path: &str) -> bool {
271 if module_path.starts_with("std::")
273 || module_path.starts_with("core::")
274 || module_path.starts_with("alloc::")
275 {
276 return true;
277 }
278
279 if module_path.starts_with("memscope_rs::") {
281 return true;
282 }
283
284 let library_prefixes = [
286 "tokio::",
287 "serde::",
288 "async_trait::",
289 "futures::",
290 "log::",
291 "tracing::",
292 "chrono::",
293 "indexmap::",
294 "rustc_hash::",
295 "parking_lot::",
296 "crossbeam::",
297 "rayon::",
298 "dashmap::",
299 "ahash::",
300 "hashbrown::",
301 ];
302
303 for prefix in library_prefixes.iter() {
304 if module_path.starts_with(prefix) {
305 return true;
306 }
307 }
308
309 false
310}
311
312fn get_or_compute_type_info(type_name: &str, size: usize) -> String {
313 if (type_name.contains("Vec<") || type_name.contains("vec::Vec<"))
315 && !type_name.contains("VecDeque")
316 {
317 "dynamic_array".to_string()
318 } else if type_name == "str"
319 || type_name == "String"
320 || type_name.contains("&str")
321 || type_name.contains("alloc::string::String")
322 {
323 "string".to_string()
324 } else if type_name.contains("Box") || type_name.contains("Rc") || type_name.contains("Arc") {
325 "smart_pointer".to_string()
326 } else if type_name.contains("[") && type_name.contains("u8") {
327 "byte_array".to_string()
328 } else if size > 1024 * 1024 {
329 "large_buffer".to_string()
330 } else {
331 "custom".to_string()
332 }
333}
334
335fn generate_memory_analysis_json<P: AsRef<Path>>(
336 output_path: P,
337 allocations: &[serde_json::Value],
338 options: &ExportJsonOptions,
339) -> Result<(), Box<dyn std::error::Error>> {
340 let total_size: usize = allocations
341 .iter()
342 .filter_map(|a| a.get("size").and_then(|s| s.as_u64()))
343 .map(|s| s as usize)
344 .sum();
345
346 let type_distribution: HashMap<String, usize> = {
347 let mut dist = HashMap::new();
348 for alloc in allocations {
349 if let Some(t) = alloc.get("type").and_then(|t| t.as_str()) {
350 *dist.entry(t.to_string()).or_insert(0) += 1;
351 }
352 }
353 dist
354 };
355
356 let data = json!({
357 "metadata": {
358 "export_version": "2.0",
359 "export_timestamp": chrono::Utc::now().to_rfc3339(),
360 "specification": "memscope-rs memory analysis",
361 "total_allocations": allocations.len(),
362 "total_size_bytes": total_size
363 },
364 "allocations": allocations,
365 "statistics": {
366 "total_allocations": allocations.len(),
367 "total_size_bytes": total_size,
368 "average_size_bytes": if allocations.is_empty() { 0 } else { total_size / allocations.len() }
369 },
370 "type_distribution": type_distribution
371 });
372
373 let path = output_path.as_ref().join("memory_analysis.json");
374 write_json_optimized(path, &data, options)?;
375 Ok(())
376}
377
378fn generate_lifetime_json<P: AsRef<Path>>(
379 output_path: P,
380 allocations: &[serde_json::Value],
381 options: &ExportJsonOptions,
382) -> Result<(), Box<dyn std::error::Error>> {
383 let ownership_histories: Vec<serde_json::Value> = allocations
384 .iter()
385 .map(|alloc| {
386 json!({
387 "address": alloc.get("address"),
388 "var_name": alloc.get("var_name"),
389 "type_name": alloc.get("type_name"),
390 "size": alloc.get("size"),
391 "timestamp_alloc": alloc.get("timestamp"),
392 "timestamp_dealloc": null,
393 "lifetime_ms": alloc.get("lifetime_ms"),
394 "events": [
395 {
396 "event_type": "Created",
397 "timestamp": alloc.get("timestamp"),
398 "context": "initial_allocation"
399 }
400 ]
401 })
402 })
403 .collect();
404
405 let lifetime_data = json!({
406 "metadata": {
407 "export_version": "2.0",
408 "export_timestamp": chrono::Utc::now().to_rfc3339(),
409 "specification": "memscope-rs lifetime tracking",
410 "total_tracked_allocations": ownership_histories.len()
411 },
412 "ownership_histories": ownership_histories
413 });
414
415 let lifetime_path = output_path.as_ref().join("lifetime.json");
416 write_json_optimized(lifetime_path, &lifetime_data, options)?;
417 Ok(())
418}
419
420fn generate_thread_analysis_json<P: AsRef<Path>>(
421 output_path: P,
422 thread_stats: &HashMap<u64, ThreadMemoryStats>,
423 options: &ExportJsonOptions,
424) -> Result<(), Box<dyn std::error::Error>> {
425 let thread_analysis: Vec<serde_json::Value> = thread_stats
426 .values()
427 .map(|stats| {
428 json!({
429 "thread_id": stats.thread_id,
430 "allocation_count": stats.allocation_count,
431 "total_allocated": stats.total_allocated,
432 "current_memory": stats.current_memory,
433 "peak_memory": stats.peak_memory,
434 })
435 })
436 .collect();
437
438 let data = json!({
439 "metadata": {
440 "export_version": "2.0",
441 "export_timestamp": chrono::Utc::now().to_rfc3339(),
442 "specification": "thread analysis",
443 "total_threads": thread_analysis.len()
444 },
445 "thread_analysis": thread_analysis
446 });
447
448 let path = output_path.as_ref().join("thread_analysis.json");
449 write_json_optimized(path, &data, options)?;
450 Ok(())
451}
452
453fn write_json_optimized<P: AsRef<Path>>(
454 path: P,
455 data: &serde_json::Value,
456 options: &ExportJsonOptions,
457) -> Result<(), Box<dyn std::error::Error>> {
458 let path = path.as_ref();
459
460 let estimated_size = estimate_json_size(data);
461 let use_compact = options
462 .use_compact_format
463 .unwrap_or(estimated_size > 1_000_000);
464
465 if options.streaming_writer && estimated_size > 500_000 {
466 let file = File::create(path)?;
467 let mut writer = BufWriter::with_capacity(options.buffer_size, file);
468
469 if use_compact {
470 serde_json::to_writer(&mut writer, data)?;
471 } else {
472 serde_json::to_writer_pretty(&mut writer, data)?;
473 }
474
475 writer.flush()?;
476 } else {
477 let json_string = if use_compact {
478 serde_json::to_string(data)?
479 } else {
480 serde_json::to_string_pretty(data)?
481 };
482 std::fs::write(path, json_string)?;
483 }
484
485 Ok(())
486}
487
488fn estimate_json_size(data: &serde_json::Value) -> usize {
489 match data {
490 serde_json::Value::Object(map) => {
491 map.values().map(estimate_json_size).sum::<usize>() + map.len() * 20
492 }
493 serde_json::Value::Array(arr) => {
494 arr.iter().map(estimate_json_size).sum::<usize>() + arr.len() * 10
495 }
496 serde_json::Value::String(s) => s.len(),
497 serde_json::Value::Number(n) => n.to_string().len(),
498 _ => 10,
499 }
500}
501
502#[derive(Debug, thiserror::Error)]
503pub enum ExportError {
504 #[error("IO error: {0}")]
505 Io(#[from] std::io::Error),
506
507 #[error("JSON error: {0}")]
508 Json(#[from] serde_json::Error),
509
510 #[error("Export failed: {0}")]
511 ExportFailed(String),
512}
513
514pub fn export_all_json<P: AsRef<Path>>(
515 path: P,
516 tracker: &Tracker,
517 passport_tracker: &Arc<MemoryPassportTracker>,
518 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
519) -> MemScopeResult<()> {
520 let path_ref = path.as_ref();
521
522 let events = tracker.event_store().snapshot();
524 let allocations = rebuild_allocations_from_events(&events);
525 let snapshot = MemorySnapshot::from_allocation_infos(allocations.clone());
526 let options = ExportJsonOptions::default();
527
528 std::fs::create_dir_all(path_ref)
529 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
530
531 debug!("Starting export_snapshot_to_json");
532
533 export_snapshot_to_json(&snapshot, path_ref, &options)
534 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
535
536 debug!("Completed export_snapshot_to_json");
537
538 debug!("Starting export_memory_passports_json");
539
540 export_memory_passports_json(path_ref, passport_tracker)
541 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
542
543 debug!("Completed export_memory_passports_json");
544
545 debug!("Starting export_leak_detection_json");
546
547 export_leak_detection_json(path_ref, passport_tracker)
548 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
549
550 debug!("Completed export_leak_detection_json");
551
552 debug!("Starting export_unsafe_ffi_json");
553
554 export_unsafe_ffi_json(path_ref, passport_tracker)
555 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
556
557 debug!("Completed export_unsafe_ffi_json");
558
559 debug!("Starting export_system_resources_json");
560
561 export_system_resources_json(path_ref)
562 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
563
564 debug!("Completed export_system_resources_json");
565
566 debug!("Starting export_async_analysis_json");
567
568 export_async_analysis_json(path_ref, async_tracker)
569 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
570
571 debug!("Completed export_async_analysis_json");
572
573 debug!("Starting export_ownership_graph_json");
574
575 let typed_allocations: Vec<crate::capture::types::AllocationInfo> =
576 allocations.clone().into_iter().collect();
577
578 debug!(
579 allocations = typed_allocations.len(),
580 "Converted allocations to typed format"
581 );
582
583 export_ownership_graph_json(path_ref, &typed_allocations, tracker.event_store())
584 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
585
586 debug!("Completed export_ownership_graph_json");
587
588 export_task_graph_json(path_ref)
590 .map_err(|e| MemScopeError::error("export", "export_all_json", e.to_string()))?;
591
592 debug!("Completed export_task_graph_json");
593
594 debug!("All exports completed successfully");
595
596 Ok(())
597}
598
599pub fn export_task_graph_json<P: AsRef<Path>>(base_path: P) -> MemScopeResult<()> {
603 use crate::task_registry::global_registry;
604
605 let base_path = base_path.as_ref();
606 let registry = global_registry();
607 let graph = registry.export_graph();
608
609 let json_string = serde_json::to_string_pretty(&graph)
610 .map_err(|e| MemScopeError::error("export", "export_task_graph_json", e.to_string()))?;
611
612 let file_path = base_path.join("task_graph.json");
613 std::fs::write(&file_path, json_string)
614 .map_err(|e| MemScopeError::error("export", "export_task_graph_json", e.to_string()))?;
615
616 tracing::info!("✅ Task graph JSON exported to: {:?}", file_path);
617
618 Ok(())
619}
620
621pub fn export_async_analysis_json<P: AsRef<Path>>(
623 path: P,
624 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
625) -> MemScopeResult<()> {
626 let path_ref = path.as_ref();
627 let stats = async_tracker.get_stats();
628 let profiles = async_tracker.get_all_profiles();
629 let snapshot = async_tracker.snapshot();
630
631 let async_data = json!({
632 "summary": {
633 "total_tasks": stats.total_tasks,
634 "active_tasks": stats.active_tasks,
635 "total_allocations": stats.total_allocations,
636 "total_memory_bytes": stats.total_memory,
637 "active_memory_bytes": stats.active_memory,
638 "peak_memory_bytes": stats.peak_memory,
639 },
640 "task_profiles": profiles.iter().map(|p| json!({
641 "task_id": p.task_id,
642 "task_name": p.task_name,
643 "task_type": format!("{:?}", p.task_type),
644 "created_at_ms": p.created_at_ms,
645 "completed_at_ms": p.completed_at_ms,
646 "total_bytes": p.total_bytes,
647 "current_memory": p.current_memory,
648 "peak_memory": p.peak_memory,
649 "total_allocations": p.total_allocations,
650 "total_deallocations": p.total_deallocations,
651 "duration_ns": p.duration_ns,
652 "allocation_rate": p.allocation_rate,
653 "efficiency_score": p.efficiency_score,
654 "average_allocation_size": p.average_allocation_size,
655 "is_completed": p.is_completed(),
656 "has_potential_leak": p.has_potential_leak(),
657 })).collect::<Vec<_>>(),
658 "allocations": snapshot.allocations.iter().map(|a| json!({
659 "ptr": format!("0x{:x}", a.ptr),
660 "size": a.size,
661 "timestamp": a.timestamp,
662 "task_id": a.task_id,
663 "var_name": a.var_name,
664 "type_name": a.type_name,
665 })).collect::<Vec<_>>(),
666 });
667
668 let async_path = path_ref.join("async_analysis.json");
669 let file = File::create(async_path)
670 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
671 let mut writer = BufWriter::new(file);
672 serde_json::to_writer_pretty(&mut writer, &async_data)
673 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
674 writer
675 .flush()
676 .map_err(|e| MemScopeError::error("export", "export_async_analysis_json", e.to_string()))?;
677
678 Ok(())
679}
680
681#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
683pub enum DashboardTemplate {
684 #[default]
686 Unified,
687 Final,
690}
691
692impl std::fmt::Display for DashboardTemplate {
693 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
694 match self {
695 DashboardTemplate::Unified => write!(f, "dashboard_unified"),
696 DashboardTemplate::Final => write!(f, "dashboard_final"),
697 }
698 }
699}
700
701pub fn export_dashboard_html<P: AsRef<Path>>(
707 path: P,
708 tracker: &Tracker,
709 passport_tracker: &Arc<MemoryPassportTracker>,
710) -> MemScopeResult<()> {
711 export_dashboard_html_with_template(
712 path,
713 tracker,
714 passport_tracker,
715 DashboardTemplate::default(),
716 None,
717 )
718}
719
720pub fn export_dashboard_html_with_async<P: AsRef<Path>>(
722 path: P,
723 tracker: &Tracker,
724 passport_tracker: &Arc<MemoryPassportTracker>,
725 async_tracker: &Arc<crate::capture::backends::async_tracker::AsyncTracker>,
726) -> MemScopeResult<()> {
727 export_dashboard_html_with_template(
728 path,
729 tracker,
730 passport_tracker,
731 DashboardTemplate::default(),
732 Some(async_tracker),
733 )
734}
735
736pub fn export_dashboard_html_with_template<P: AsRef<Path>>(
741 path: P,
742 tracker: &Tracker,
743 passport_tracker: &Arc<MemoryPassportTracker>,
744 template: DashboardTemplate,
745 async_tracker: Option<&Arc<crate::capture::backends::async_tracker::AsyncTracker>>,
746) -> MemScopeResult<()> {
747 let path_ref = path.as_ref();
748
749 std::fs::create_dir_all(path_ref).map_err(|e| {
751 MemScopeError::error(
752 "export",
753 "export_dashboard_html_with_template",
754 format!("Failed to create output directory: {}", e),
755 )
756 })?;
757
758 let renderer = DashboardRenderer::new().map_err(|e| {
760 MemScopeError::error(
761 "export",
762 "export_dashboard_html_with_template",
763 format!("Failed to create dashboard renderer: {}", e),
764 )
765 })?;
766
767 let context = renderer
769 .build_context_from_tracker_with_async(tracker, passport_tracker, async_tracker)
770 .map_err(|e| {
771 MemScopeError::error(
772 "export",
773 "export_dashboard_html_with_template",
774 format!("Failed to build context: {}", e),
775 )
776 })?;
777
778 let html_content = match template {
779 DashboardTemplate::Final => renderer.render_final_dashboard(&context).map_err(|e| {
780 MemScopeError::error(
781 "export",
782 "export_dashboard_html_with_template",
783 format!("Failed to render final dashboard: {}", e),
784 )
785 })?,
786 DashboardTemplate::Unified => renderer.render_unified_dashboard(&context).map_err(|e| {
787 MemScopeError::error(
788 "export",
789 "export_dashboard_html_with_template",
790 format!("Failed to render dashboard: {}", e),
791 )
792 })?,
793 };
794
795 let output_file = path_ref.join(format!("{}_dashboard.html", template));
797 std::fs::write(&output_file, html_content).map_err(|e| {
798 MemScopeError::error(
799 "export",
800 "export_dashboard_html_with_template",
801 format!("Failed to write HTML file: {}", e),
802 )
803 })?;
804
805 tracing::info!("✅ Dashboard HTML exported to: {:?}", output_file);
806
807 Ok(())
808}
809
810pub fn export_memory_passports_json<P: AsRef<Path>>(
811 base_path: P,
812 passport_tracker: &Arc<MemoryPassportTracker>,
813) -> MemScopeResult<()> {
814 let base_path = base_path.as_ref();
815 let passports = passport_tracker.get_all_passports();
816
817 let passport_data: Vec<_> = passports
818 .values()
819 .map(|p| {
820 serde_json::json!({
821 "passport_id": p.passport_id,
822 "allocation_ptr": format!("0x{:x}", p.allocation_ptr),
823 "size_bytes": p.size_bytes,
824 "created_at": p.created_at,
825 "lifecycle_events": p.lifecycle_events.len(),
826 "status": format!("{:?}", p.status_at_shutdown),
827 })
828 })
829 .collect();
830
831 let json_data = serde_json::json!({
832 "metadata": {
833 "export_version": "2.0",
834 "specification": "memory passport tracking",
835 "total_passports": passports.len()
836 },
837 "memory_passports": passport_data,
838 });
839
840 let file_path = base_path.join("memory_passports.json");
841 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
842 MemScopeError::error("export", "export_memory_passports_json", e.to_string())
843 })?;
844 std::fs::write(&file_path, json_string).map_err(|e| {
845 MemScopeError::error("export", "export_memory_passports_json", e.to_string())
846 })?;
847
848 Ok(())
849}
850
851pub fn export_leak_detection_json<P: AsRef<Path>>(
852 base_path: P,
853 passport_tracker: &Arc<MemoryPassportTracker>,
854) -> MemScopeResult<()> {
855 let base_path = base_path.as_ref();
856 let leak_result = passport_tracker.detect_leaks_at_shutdown();
857
858 let leak_details: Vec<_> = leak_result
859 .leak_details
860 .iter()
861 .map(|detail| {
862 serde_json::json!({
863 "passport_id": detail.passport_id,
864 "memory_address": format!("0x{:x}", detail.memory_address),
865 "size_bytes": detail.size_bytes,
866 "lifecycle_summary": detail.lifecycle_summary,
867 })
868 })
869 .collect();
870
871 let json_data = serde_json::json!({
872 "metadata": {
873 "export_version": "2.0",
874 "specification": "leak detection",
875 "leaks_detected": leak_result.total_leaks
876 },
877 "leak_detection": {
878 "total_leaks": leak_result.total_leaks,
879 "leak_details": leak_details
880 }
881 });
882
883 let file_path = base_path.join("leak_detection.json");
884 let json_string = serde_json::to_string_pretty(&json_data)
885 .map_err(|e| MemScopeError::error("export", "export_leak_detection_json", e.to_string()))?;
886 std::fs::write(&file_path, json_string)
887 .map_err(|e| MemScopeError::error("export", "export_leak_detection_json", e.to_string()))?;
888
889 Ok(())
890}
891
892pub fn export_unsafe_ffi_json<P: AsRef<Path>>(
893 base_path: P,
894 passport_tracker: &Arc<MemoryPassportTracker>,
895) -> MemScopeResult<()> {
896 use crate::analysis::memory_passport_tracker::PassportStatus;
897
898 let base_path = base_path.as_ref();
899 let passports = passport_tracker.get_all_passports();
900
901 let ffi_reports: Vec<_> = passports
902 .values()
903 .filter(|p| {
904 matches!(
905 p.status_at_shutdown,
906 PassportStatus::HandoverToFfi
907 | PassportStatus::InForeignCustody
908 | PassportStatus::FreedByForeign
909 )
910 })
911 .map(|p| {
912 serde_json::json!({
913 "passport_id": p.passport_id,
914 "allocation_ptr": format!("0x{:x}", p.allocation_ptr),
915 "size_bytes": p.size_bytes,
916 "status": format!("{:?}", p.status_at_shutdown),
917 "created_at": p.created_at,
918 "boundary_events": p.lifecycle_events.iter().map(|e| {
919 serde_json::json!({
920 "timestamp": e.timestamp,
921 "event_type": format!("{:?}", e.event_type),
922 "context": e.context,
923 })
924 }).collect::<Vec<_>>(),
925 })
926 })
927 .collect();
928
929 let json_data = serde_json::json!({
930 "metadata": {
931 "export_version": "2.0",
932 "specification": "unsafe FFI tracking",
933 "total_ffi_reports": ffi_reports.len(),
934 "total_memory_passports": passports.len()
935 },
936 "unsafe_reports": ffi_reports,
937 "memory_passports": passports.len()
938 });
939
940 let file_path = base_path.join("unsafe_ffi.json");
941 let json_string = serde_json::to_string_pretty(&json_data)
942 .map_err(|e| MemScopeError::error("export", "export_unsafe_ffi_json", e.to_string()))?;
943 std::fs::write(&file_path, json_string)
944 .map_err(|e| MemScopeError::error("export", "export_unsafe_ffi_json", e.to_string()))?;
945
946 Ok(())
947}
948
949pub fn export_system_resources_json<P: AsRef<Path>>(base_path: P) -> MemScopeResult<()> {
956 let base_path = base_path.as_ref();
957
958 let mut memory_info = PlatformMemoryInfo::new();
960 let _ = memory_info.initialize();
961
962 let memory_stats = match memory_info.collect_stats() {
963 Ok(stats) => stats,
964 Err(e) => {
965 warn!(error = %e, "Failed to collect memory stats");
966 return Err(MemScopeError::error(
967 "export",
968 "export_system_resources_json",
969 e.to_string(),
970 ));
971 }
972 };
973
974 let system_info = match memory_info.get_system_info() {
976 Ok(info) => info,
977 Err(e) => {
978 warn!(error = %e, "Failed to collect system info");
979 return Err(MemScopeError::error(
980 "export",
981 "export_system_resources_json",
982 e.to_string(),
983 ));
984 }
985 };
986
987 let json_data = serde_json::json!({
989 "metadata": {
990 "export_version": "2.0",
991 "specification": "system resource monitoring",
992 "timestamp": std::time::SystemTime::now()
993 .duration_since(std::time::UNIX_EPOCH)
994 .unwrap_or_default()
995 .as_secs()
996 },
997 "system_info": {
998 "os_name": system_info.os_name,
999 "os_version": system_info.os_version,
1000 "architecture": system_info.architecture,
1001 "cpu_cores": system_info.cpu_cores,
1002 "page_size": system_info.page_size,
1003 "large_page_size": system_info.large_page_size,
1004 "cpu_cache": {
1005 "l1_cache_size": system_info.cpu_cache.l1_cache_size,
1006 "l2_cache_size": system_info.cpu_cache.l2_cache_size,
1007 "l3_cache_size": system_info.cpu_cache.l3_cache_size,
1008 "cache_line_size": system_info.cpu_cache.cache_line_size
1009 },
1010 "mmu_info": {
1011 "virtual_address_bits": system_info.mmu_info.virtual_address_bits,
1012 "physical_address_bits": system_info.mmu_info.physical_address_bits,
1013 "aslr_enabled": system_info.mmu_info.aslr_enabled,
1014 "nx_bit_supported": system_info.mmu_info.nx_bit_supported
1015 }
1016 },
1017 "memory_stats": {
1018 "virtual_memory": {
1019 "total_virtual": memory_stats.virtual_memory.total_virtual,
1020 "available_virtual": memory_stats.virtual_memory.available_virtual,
1021 "used_virtual": memory_stats.virtual_memory.used_virtual,
1022 "reserved": memory_stats.virtual_memory.reserved,
1023 "committed": memory_stats.virtual_memory.committed
1024 },
1025 "physical_memory": {
1026 "total_physical": memory_stats.physical_memory.total_physical,
1027 "available_physical": memory_stats.physical_memory.available_physical,
1028 "used_physical": memory_stats.physical_memory.used_physical,
1029 "cached": memory_stats.physical_memory.cached,
1030 "buffers": memory_stats.physical_memory.buffers,
1031 "swap": {
1032 "total_swap": memory_stats.physical_memory.swap.total_swap,
1033 "used_swap": memory_stats.physical_memory.swap.used_swap,
1034 "available_swap": memory_stats.physical_memory.swap.available_swap,
1035 "swap_in_rate": memory_stats.physical_memory.swap.swap_in_rate,
1036 "swap_out_rate": memory_stats.physical_memory.swap.swap_out_rate
1037 }
1038 },
1039 "process_memory": {
1040 "virtual_size": memory_stats.process_memory.virtual_size,
1041 "resident_size": memory_stats.process_memory.resident_size,
1042 "shared_size": memory_stats.process_memory.shared_size,
1043 "private_size": memory_stats.process_memory.private_size,
1044 "heap_size": memory_stats.process_memory.heap_size,
1045 "stack_size": memory_stats.process_memory.stack_size,
1046 "mapped_files": memory_stats.process_memory.mapped_files,
1047 "peak_usage": memory_stats.process_memory.peak_usage
1048 },
1049 "system_memory": {
1050 "allocation_count": memory_stats.system_memory.allocation_count,
1051 "deallocation_count": memory_stats.system_memory.deallocation_count,
1052 "active_allocations": memory_stats.system_memory.active_allocations,
1053 "total_allocated": memory_stats.system_memory.total_allocated,
1054 "total_deallocated": memory_stats.system_memory.total_deallocated,
1055 "fragmentation_level": memory_stats.system_memory.fragmentation_level,
1056 "large_pages": {
1057 "supported": memory_stats.system_memory.large_pages.supported,
1058 "total_large_pages": memory_stats.system_memory.large_pages.total_large_pages,
1059 "used_large_pages": memory_stats.system_memory.large_pages.used_large_pages,
1060 "page_size": memory_stats.system_memory.large_pages.page_size
1061 }
1062 },
1063 "pressure_indicators": {
1064 "pressure_level": format!("{:?}", memory_stats.pressure_indicators.pressure_level),
1065 "low_memory": memory_stats.pressure_indicators.low_memory,
1066 "swapping_active": memory_stats.pressure_indicators.swapping_active,
1067 "allocation_failure_rate": memory_stats.pressure_indicators.allocation_failure_rate,
1068 "gc_pressure": memory_stats.pressure_indicators.gc_pressure
1069 }
1070 }
1071 });
1072
1073 let file_path = base_path.join("system_resources.json");
1074 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
1075 MemScopeError::error("export", "export_system_resources_json", e.to_string())
1076 })?;
1077 std::fs::write(&file_path, json_string).map_err(|e| {
1078 MemScopeError::error("export", "export_system_resources_json", e.to_string())
1079 })?;
1080
1081 Ok(())
1082}
1083
1084pub fn export_ownership_graph_json<P: AsRef<Path>>(
1092 base_path: P,
1093 allocations: &[crate::capture::types::AllocationInfo],
1094 event_store: &crate::event_store::EventStore,
1095) -> MemScopeResult<()> {
1096 let base_path = base_path.as_ref();
1097
1098 let graph = build_ownership_graph_from_allocations(allocations, event_store);
1100
1101 let diagnostics = graph.diagnostics(50);
1103
1104 let borrow_analyzer = crate::analysis::borrow_analysis::get_global_borrow_analyzer();
1106 let borrow_history = borrow_analyzer.get_borrow_history();
1107
1108 let nodes_json: Vec<_> = graph
1110 .nodes
1111 .iter()
1112 .map(|node| {
1113 json!({
1114 "id": format!("0x{:x}", node.id.0),
1115 "type_name": node.type_name,
1116 "size": node.size,
1117 "stack_ptr": node.stack_ptr.map(|p| format!("0x{:x}", p)),
1118 })
1119 })
1120 .collect();
1121
1122 let mut edges_json: Vec<_> = graph
1124 .edges
1125 .iter()
1126 .map(|edge| {
1127 json!({
1128 "from": format!("0x{:x}", edge.from.0),
1129 "to": format!("0x{:x}", edge.to.0),
1130 "kind": match edge.op {
1131 EdgeKind::Owns => "Owns",
1132 EdgeKind::Contains => "Contains",
1133 EdgeKind::Borrows => "Borrows",
1134 EdgeKind::RcClone => "RcClone",
1135 EdgeKind::ArcClone => "ArcClone",
1136 EdgeKind::Move => "Move",
1137 EdgeKind::SharedBorrow => "SharedBorrow",
1138 EdgeKind::MutBorrow => "MutBorrow",
1139 },
1140 })
1141 })
1142 .collect();
1143
1144 for event in &borrow_history {
1146 let edge_kind = match event.borrow_info.borrow_type {
1147 crate::analysis::borrow_analysis::BorrowType::Immutable => "SharedBorrow",
1148 crate::analysis::borrow_analysis::BorrowType::Mutable => "MutBorrow",
1149 crate::analysis::borrow_analysis::BorrowType::Shared => "Borrows",
1150 crate::analysis::borrow_analysis::BorrowType::Weak => "Borrows",
1151 };
1152
1153 edges_json.push(json!({
1154 "from": format!("0x{:x}", event.borrow_info.ptr),
1155 "to": format!("0x{:x}", event.borrow_info.ptr),
1156 "kind": edge_kind,
1157 "var_name": event.borrow_info.var_name,
1158 "thread_id": event.borrow_info.thread_id,
1159 "borrow_id": format!("{:?}", event.borrow_info.id),
1160 }));
1161 }
1162
1163 let cycles_json: Vec<_> = graph
1165 .cycles
1166 .iter()
1167 .map(|cycle| {
1168 let nodes: Vec<_> = cycle.iter().map(|id| format!("0x{:x}", id.0)).collect();
1169 json!({
1170 "nodes": nodes,
1171 })
1172 })
1173 .collect();
1174
1175 let issues_json: Vec<_> = diagnostics
1177 .issues
1178 .iter()
1179 .map(|issue| match issue {
1180 crate::analysis::ownership_graph::DiagnosticIssue::RcCycle { nodes, cycle_type } => {
1181 json!({
1182 "type": "RcCycle",
1183 "cycle_type": format!("{:?}", cycle_type),
1184 "nodes": nodes.iter().map(|id| format!("0x{:x}", id.0)).collect::<Vec<_>>(),
1185 "severity": "error",
1186 })
1187 }
1188 crate::analysis::ownership_graph::DiagnosticIssue::ArcCloneStorm {
1189 clone_count,
1190 threshold,
1191 } => {
1192 json!({
1193 "type": "ArcCloneStorm",
1194 "clone_count": clone_count,
1195 "threshold": threshold,
1196 "severity": "warning",
1197 })
1198 }
1199 })
1200 .collect();
1201
1202 let root_cause_json = graph.find_root_cause().map(|rc| {
1204 json!({
1205 "cause": match rc.root_cause {
1206 crate::analysis::ownership_graph::RootCause::ArcCloneStorm => "ArcCloneStorm",
1207 crate::analysis::ownership_graph::RootCause::RcCycle => "RcCycle",
1208 },
1209 "description": rc.description,
1210 "impact": rc.impact,
1211 })
1212 });
1213
1214 let json_data = json!({
1215 "metadata": {
1216 "export_version": "2.0",
1217 "specification": "ownership graph analysis",
1218 "timestamp": std::time::SystemTime::now()
1219 .duration_since(std::time::UNIX_EPOCH)
1220 .unwrap_or_default()
1221 .as_secs()
1222 },
1223 "summary": {
1224 "total_nodes": graph.nodes.len(),
1225 "total_edges": graph.edges.len(),
1226 "total_cycles": graph.cycles.len(),
1227 "rc_clone_count": diagnostics.rc_clone_count,
1228 "arc_clone_count": diagnostics.arc_clone_count,
1229 "has_issues": diagnostics.has_issues(),
1230 },
1231 "nodes": nodes_json,
1232 "edges": edges_json,
1233 "cycles": cycles_json,
1234 "diagnostics": {
1235 "issues": issues_json,
1236 "root_cause": root_cause_json,
1237 },
1238 });
1239
1240 let file_path = base_path.join("ownership_graph.json");
1241 let json_string = serde_json::to_string_pretty(&json_data).map_err(|e| {
1242 MemScopeError::error("export", "export_ownership_graph_json", e.to_string())
1243 })?;
1244 std::fs::write(&file_path, json_string).map_err(|e| {
1245 MemScopeError::error("export", "export_ownership_graph_json", e.to_string())
1246 })?;
1247
1248 Ok(())
1249}
1250
1251fn build_ownership_graph_from_allocations(
1253 allocations: &[crate::capture::types::AllocationInfo],
1254 event_store: &crate::event_store::EventStore,
1255) -> OwnershipGraph {
1256 debug!(
1257 allocations = allocations.len(),
1258 "Starting build_ownership_graph"
1259 );
1260 use crate::analysis::relation_inference::{detect_containers, Relation, RelationGraphBuilder};
1261 use crate::event_store::MemoryEventType;
1262
1263 debug!("Converting allocations to passports");
1266 let passports: Vec<(
1267 NodeId,
1268 String,
1269 usize,
1270 Vec<crate::analysis::ownership_graph::OwnershipEvent>,
1271 )> = allocations
1272 .iter()
1273 .enumerate()
1274 .map(|(idx, alloc)| {
1275 let unique_ptr = if alloc.ptr == 0 {
1276 crate::analysis::VIRTUAL_PTR_BASE + idx
1277 } else {
1278 alloc.ptr
1279 };
1280 let id = NodeId::from_ptr(unique_ptr);
1281 let type_name = alloc
1282 .type_name
1283 .clone()
1284 .unwrap_or_else(|| "unknown".to_string());
1285 let size = alloc.size;
1286
1287 let events = vec![crate::analysis::ownership_graph::OwnershipEvent::new(
1289 alloc.timestamp_alloc,
1290 OwnershipOp::Create,
1291 id,
1292 None,
1293 )];
1294
1295 if type_name.contains("Arc<") || type_name.contains("Rc<") {
1297 }
1301
1302 (id, type_name, size, events)
1303 })
1304 .collect();
1305 debug!(passports = passports.len(), "Created passports");
1306
1307 debug!("Building initial ownership graph");
1308 let mut graph = OwnershipGraph::build(&passports);
1309 debug!(
1310 nodes = graph.nodes.len(),
1311 edges = graph.edges.len(),
1312 "Initial graph built"
1313 );
1314
1315 let _heap_owner_original_indices: Vec<usize> = allocations
1320 .iter()
1321 .enumerate()
1322 .filter(|(_, a)| a.timestamp_dealloc.is_none())
1323 .map(|(i, _)| i)
1324 .collect();
1325
1326 let heap_owner_allocations: Vec<ActiveAllocation> = allocations
1327 .iter()
1328 .enumerate()
1329 .filter(|(_, a)| a.timestamp_dealloc.is_none())
1330 .filter_map(|(_idx, a)| {
1331 if a.ptr == 0 || is_virtual_pointer(a.ptr) {
1333 return None;
1334 }
1335
1336 Some(ActiveAllocation {
1337 ptr: Some(a.ptr),
1338 kind: crate::core::types::TrackKind::HeapOwner {
1339 ptr: a.ptr,
1340 size: a.size,
1341 },
1342 size: a.size,
1343 allocated_at: a.timestamp_alloc,
1344 var_name: a.var_name.clone(),
1345 type_name: a.type_name.clone(),
1346 thread_id: a.thread_id_u64,
1347 call_stack_hash: None,
1348 module_path: a.module_path.clone(),
1349 stack_ptr: a.stack_ptr,
1350 })
1351 })
1352 .collect();
1353
1354 for (i, alloc) in heap_owner_allocations.iter().enumerate() {
1356 if i < graph.nodes.len() {
1357 graph.nodes[i].stack_ptr = alloc.stack_ptr;
1358 }
1359 }
1360
1361 let valid_thread_ids: std::collections::HashSet<u64> =
1364 heap_owner_allocations.iter().map(|a| a.thread_id).collect();
1365
1366 let container_events: Vec<_> = event_store
1367 .snapshot()
1368 .into_iter()
1369 .filter(|e| e.event_type == MemoryEventType::Metadata)
1370 .filter(|e| valid_thread_ids.contains(&e.thread_id))
1371 .filter_map(|e| {
1372 let type_name = e.type_name.clone().unwrap_or_default();
1373 let var_name = e.var_name.clone().unwrap_or_default();
1374 let is_container = type_name.contains("HashMap")
1375 || type_name.contains("BTreeMap")
1376 || type_name.contains("VecDeque")
1377 || type_name.contains("RefCell")
1378 || type_name.contains("RwLock");
1379 if is_container {
1380 return Some((e, type_name, var_name));
1381 }
1382 None
1383 })
1384 .collect();
1385
1386 let container_allocations: Vec<ActiveAllocation> = container_events
1388 .iter()
1389 .enumerate()
1390 .map(|(idx, (e, type_name, var_name))| {
1391 let virtual_ptr = 0x300000000u64 as usize + idx;
1392 ActiveAllocation {
1393 ptr: Some(virtual_ptr),
1394 kind: crate::core::types::TrackKind::Container,
1395 size: e.size.max(1),
1396 allocated_at: e.timestamp,
1397 var_name: Some(var_name.clone()),
1398 type_name: Some(type_name.clone()),
1399 thread_id: e.thread_id,
1400 call_stack_hash: e.call_stack_hash,
1401 module_path: e.module_path.clone(),
1402 stack_ptr: None,
1403 }
1404 })
1405 .collect();
1406
1407 let mut all_for_relation: Vec<ActiveAllocation> = Vec::new();
1408 all_for_relation.extend(heap_owner_allocations.clone());
1409 all_for_relation.extend(container_allocations.clone());
1410
1411 debug!("Running container detection");
1413 let container_config = crate::analysis::relation_inference::ContainerConfig {
1414 time_window_ns: 10_000_000, size_ratio: 10000, lookahead: 10, };
1418
1419 let container_edges = detect_containers(&all_for_relation, Some(container_config));
1420 debug!(
1421 container_edges = container_edges.len(),
1422 "Container detection completed"
1423 );
1424
1425 debug!("Running RelationGraphBuilder");
1427 let relation_graph = RelationGraphBuilder::build(&heap_owner_allocations, None);
1428 debug!(
1429 edges = relation_graph.edges.len(),
1430 "RelationGraphBuilder completed"
1431 );
1432
1433 const CONTAINER_PTR_BASE: usize = 0x300000000;
1436 let heap_owner_count = graph.nodes.len();
1437 for (idx, (e, type_name, _var_name)) in container_events.iter().enumerate() {
1438 let node_id = NodeId::from_ptr(CONTAINER_PTR_BASE + idx);
1439 graph.nodes.push(crate::analysis::ownership_graph::Node {
1440 id: node_id,
1441 type_name: type_name.clone(),
1442 size: e.size,
1443 stack_ptr: None,
1444 });
1445 }
1446
1447 for edge in &relation_graph.edges {
1449 let from_id = graph.nodes[edge.from].id;
1450 let to_id = graph.nodes[edge.to].id;
1451
1452 let edge_kind = match edge.relation {
1453 Relation::Owns => EdgeKind::Owns,
1454 Relation::Contains => EdgeKind::Contains,
1455 Relation::Slice => EdgeKind::Borrows,
1456 Relation::Clone => EdgeKind::RcClone,
1457 Relation::Shares => EdgeKind::ArcClone,
1458 Relation::Evolution => EdgeKind::Contains,
1459 Relation::ArcClone => EdgeKind::ArcClone,
1460 Relation::RcClone => EdgeKind::RcClone,
1461 Relation::ImmutableBorrow => EdgeKind::SharedBorrow,
1462 Relation::MutableBorrow => EdgeKind::MutBorrow,
1463 };
1464
1465 graph.edges.push(crate::analysis::ownership_graph::Edge {
1466 from: from_id,
1467 to: to_id,
1468 op: edge_kind,
1469 });
1470 }
1471
1472 for edge in &container_edges {
1474 let from_all_idx = edge.from;
1477 let to_all_idx = edge.to;
1478
1479 if from_all_idx < heap_owner_count || to_all_idx >= heap_owner_count {
1481 continue; }
1483
1484 let container_graph_idx = from_all_idx - heap_owner_count;
1486 if container_graph_idx >= container_events.len() {
1487 continue; }
1489
1490 let from_id = graph.nodes[heap_owner_count + container_graph_idx].id;
1491 let to_id = graph.nodes[to_all_idx].id;
1492
1493 graph.edges.push(crate::analysis::ownership_graph::Edge {
1494 from: from_id,
1495 to: to_id,
1496 op: EdgeKind::Contains,
1497 });
1498 }
1499
1500 debug!(
1501 nodes = graph.nodes.len(),
1502 edges = graph.edges.len(),
1503 "Final ownership graph built"
1504 );
1505 graph
1506}
1507
1508#[cfg(test)]
1509mod tests {
1510 use super::*;
1511
1512 #[test]
1515 fn test_optimization_level_variants() {
1516 let _low = OptimizationLevel::Low;
1517 let _medium = OptimizationLevel::Medium;
1518 let _high = OptimizationLevel::High;
1519 let _maximum = OptimizationLevel::Maximum;
1520 }
1521
1522 #[test]
1525 fn test_optimization_level_default() {
1526 let level = OptimizationLevel::default();
1527 assert!(
1528 matches!(level, OptimizationLevel::Medium),
1529 "Default should be Medium"
1530 );
1531 }
1532
1533 #[test]
1536 fn test_schema_validator_new() {
1537 let validator = SchemaValidator::new();
1538 let data = serde_json::json!({"test": "value"});
1539 let result = validator.validate(&data);
1540 assert!(result.is_ok(), "Validation should pass for any object");
1541 }
1542
1543 #[test]
1546 fn test_schema_validator_strict_mode() {
1547 let validator = SchemaValidator::new().with_strict_mode(true);
1548
1549 let missing_fields = serde_json::json!({"other": "data"});
1550 let result = validator.validate(&missing_fields);
1551 assert!(result.is_err(), "Should fail with missing required fields");
1552
1553 let valid_data = serde_json::json!({
1554 "timestamp": 123,
1555 "allocations": [],
1556 "stats": {}
1557 });
1558 let result = validator.validate(&valid_data);
1559 assert!(result.is_ok(), "Should pass with all required fields");
1560 }
1561
1562 #[test]
1565 fn test_schema_validator_non_object() {
1566 let validator = SchemaValidator::new();
1567 let data = serde_json::json!("not an object");
1568 let result = validator.validate(&data);
1569 assert!(result.is_err(), "Should reject non-object data");
1570 }
1571
1572 #[test]
1575 fn test_export_json_options_default() {
1576 let options = ExportJsonOptions::default();
1577 assert!(
1578 options.parallel_processing,
1579 "parallel_processing should be true by default"
1580 );
1581 assert!(
1582 options.streaming_writer,
1583 "streaming_writer should be true by default"
1584 );
1585 assert!(
1586 options.enable_type_cache,
1587 "enable_type_cache should be true by default"
1588 );
1589 assert!(
1590 options.adaptive_optimization,
1591 "adaptive_optimization should be true by default"
1592 );
1593 assert!(
1594 !options.schema_validation,
1595 "schema_validation should be false by default"
1596 );
1597 assert!(
1598 !options.security_analysis,
1599 "security_analysis should be false by default"
1600 );
1601 }
1602
1603 #[test]
1606 fn test_export_json_options_builders() {
1607 let options = ExportJsonOptions::default()
1608 .fast_export_mode(true)
1609 .security_analysis(true)
1610 .streaming_writer(false)
1611 .schema_validation(true)
1612 .integrity_hashes(true)
1613 .batch_size(500)
1614 .adaptive_optimization(false)
1615 .max_cache_size(5000)
1616 .include_low_severity(true)
1617 .thread_count(Some(4));
1618
1619 assert!(options.fast_export_mode, "fast_export_mode should be true");
1620 assert!(
1621 options.security_analysis,
1622 "security_analysis should be true"
1623 );
1624 assert!(
1625 !options.streaming_writer,
1626 "streaming_writer should be false"
1627 );
1628 assert!(
1629 options.schema_validation,
1630 "schema_validation should be true"
1631 );
1632 assert!(options.integrity_hashes, "integrity_hashes should be true");
1633 assert_eq!(options.batch_size, 500, "batch_size should be 500");
1634 assert!(
1635 !options.adaptive_optimization,
1636 "adaptive_optimization should be false"
1637 );
1638 assert_eq!(
1639 options.max_cache_size, 5000,
1640 "max_cache_size should be 5000"
1641 );
1642 assert!(
1643 options.include_low_severity,
1644 "include_low_severity should be true"
1645 );
1646 assert_eq!(
1647 options.thread_count,
1648 Some(4),
1649 "thread_count should be Some(4)"
1650 );
1651 }
1652
1653 #[test]
1656 fn test_export_error_variants() {
1657 let io_err = ExportError::Io(std::io::Error::new(std::io::ErrorKind::NotFound, "test"));
1658 let json_err = ExportError::Json(serde_json::from_str::<i32>("invalid").unwrap_err());
1659 let export_err = ExportError::ExportFailed("test error".to_string());
1660
1661 assert!(
1662 format!("{}", io_err).contains("IO error"),
1663 "Should contain IO error"
1664 );
1665 assert!(
1666 format!("{}", json_err).contains("JSON error"),
1667 "Should contain JSON error"
1668 );
1669 assert!(
1670 format!("{}", export_err).contains("Export failed"),
1671 "Should contain Export failed"
1672 );
1673 }
1674
1675 #[test]
1678 fn test_estimate_json_size() {
1679 let string_val = serde_json::json!("hello world");
1680 let num_val = serde_json::json!(42);
1681 let array_val = serde_json::json!([1, 2, 3]);
1682 let object_val = serde_json::json!({"key": "value"});
1683
1684 let string_size = estimate_json_size(&string_val);
1685 let num_size = estimate_json_size(&num_val);
1686 let array_size = estimate_json_size(&array_val);
1687 let object_size = estimate_json_size(&object_val);
1688
1689 assert!(string_size > 0, "String size should be positive");
1690 assert!(num_size > 0, "Number size should be positive");
1691 assert!(array_size > 0, "Array size should be positive");
1692 assert!(object_size > 0, "Object size should be positive");
1693 }
1694
1695 #[test]
1698 fn test_get_or_compute_type_info() {
1699 assert_eq!(
1700 get_or_compute_type_info("Vec<i32>", 100),
1701 "dynamic_array",
1702 "Vec should be dynamic_array"
1703 );
1704 assert_eq!(
1705 get_or_compute_type_info("String", 24),
1706 "string",
1707 "String should be string"
1708 );
1709 assert_eq!(
1710 get_or_compute_type_info("Box<i32>", 8),
1711 "smart_pointer",
1712 "Box should be smart_pointer"
1713 );
1714 assert_eq!(
1715 get_or_compute_type_info("Rc<String>", 8),
1716 "smart_pointer",
1717 "Rc should be smart_pointer"
1718 );
1719 assert_eq!(
1720 get_or_compute_type_info("Arc<i32>", 16),
1721 "smart_pointer",
1722 "Arc should be smart_pointer"
1723 );
1724 assert_eq!(
1725 get_or_compute_type_info("[u8; 100]", 100),
1726 "byte_array",
1727 "u8 array should be byte_array"
1728 );
1729 assert_eq!(
1730 get_or_compute_type_info("CustomType", 1024 * 1024 * 2),
1731 "large_buffer",
1732 "Large allocation should be large_buffer"
1733 );
1734 assert_eq!(
1735 get_or_compute_type_info("MyType", 100),
1736 "custom",
1737 "Unknown type should be custom"
1738 );
1739 }
1740}