1use crate::analysis::security_violation_analyzer::{
7 AnalysisConfig, SecurityViolationAnalyzer, ViolationSeverity,
8};
9use crate::analysis::unsafe_ffi_tracker::{get_global_unsafe_ffi_tracker, SafetyViolation};
10use crate::core::tracker::MemoryTracker;
11use crate::core::types::{AllocationInfo, TrackingResult};
12use crate::export::adaptive_performance::AdaptivePerformanceOptimizer;
13use crate::export::fast_export_coordinator::{FastExportConfigBuilder, FastExportCoordinator};
14use crate::export::schema_validator::SchemaValidator;
15use rayon::prelude::*;
16
17use std::{
18 collections::HashMap,
19 fs::File,
20 io::{BufWriter, Write},
21 path::Path,
22 sync::LazyLock,
23};
24
25#[derive(Debug, Clone, Copy, PartialEq, Eq)]
27pub enum JsonFileType {
28 MemoryAnalysis,
30 Lifetime,
32 UnsafeFfi,
34 Performance,
36 ComplexTypes,
38 SecurityViolations,
40 }
45
46impl JsonFileType {
47 pub fn standard_four() -> Vec<JsonFileType> {
49 vec![
50 JsonFileType::MemoryAnalysis,
51 JsonFileType::Lifetime,
52 JsonFileType::UnsafeFfi,
53 JsonFileType::Performance,
54 ]
55 }
56
57 pub fn standard_five() -> Vec<JsonFileType> {
59 vec![
60 JsonFileType::MemoryAnalysis,
61 JsonFileType::Lifetime,
62 JsonFileType::UnsafeFfi,
63 JsonFileType::Performance,
64 JsonFileType::ComplexTypes,
65 ]
66 }
67
68 pub fn file_suffix(&self) -> &'static str {
70 match self {
71 JsonFileType::MemoryAnalysis => "memory_analysis",
72 JsonFileType::Lifetime => "lifetime",
73 JsonFileType::UnsafeFfi => "unsafe_ffi",
74 JsonFileType::Performance => "performance",
75 JsonFileType::ComplexTypes => "complex_types",
76 JsonFileType::SecurityViolations => "security_violations",
77 }
78 }
79}
80
81static ADAPTIVE_OPTIMIZER: LazyLock<std::sync::Mutex<AdaptivePerformanceOptimizer>> =
83 LazyLock::new(|| std::sync::Mutex::new(AdaptivePerformanceOptimizer::default()));
84
85static SECURITY_ANALYZER: LazyLock<std::sync::Mutex<SecurityViolationAnalyzer>> =
87 LazyLock::new(|| std::sync::Mutex::new(SecurityViolationAnalyzer::default()));
88
89#[derive(Debug, Clone)]
91pub struct OptimizedExportOptions {
92 pub parallel_processing: bool,
94 pub buffer_size: usize,
96 pub use_compact_format: Option<bool>,
98 pub enable_type_cache: bool,
100 pub batch_size: usize,
102 pub use_streaming_writer: bool,
104 pub enable_schema_validation: bool,
106 pub optimization_level: OptimizationLevel,
108 pub enable_enhanced_ffi_analysis: bool,
110 pub enable_boundary_event_processing: bool,
112 pub enable_memory_passport_tracking: bool,
114 pub enable_adaptive_optimization: bool,
116 pub max_cache_size: usize,
118 pub target_batch_time_ms: u64,
120 pub enable_security_analysis: bool,
122 pub include_low_severity_violations: bool,
124 pub generate_integrity_hashes: bool,
126 pub enable_fast_export_mode: bool,
128 pub auto_fast_export_threshold: Option<usize>,
130 pub thread_count: Option<usize>,
132}
133
134#[derive(Debug, Clone, Copy, PartialEq, Eq)]
136pub enum OptimizationLevel {
137 Low,
139 Medium,
141 High,
143 Maximum,
145}
146
147impl Default for OptimizedExportOptions {
148 fn default() -> Self {
149 Self {
150 parallel_processing: true,
151 buffer_size: 256 * 1024, use_compact_format: None, enable_type_cache: true,
154 batch_size: 1000,
155 use_streaming_writer: true,
156 enable_schema_validation: true,
157 optimization_level: OptimizationLevel::High,
158 enable_enhanced_ffi_analysis: true,
159 enable_boundary_event_processing: true,
160 enable_memory_passport_tracking: true,
161 enable_adaptive_optimization: true,
162 max_cache_size: 1000,
163 target_batch_time_ms: 10,
164 enable_security_analysis: true,
165 include_low_severity_violations: true,
166 generate_integrity_hashes: true,
167 enable_fast_export_mode: false,
168 auto_fast_export_threshold: Some(5000),
169 thread_count: None, }
171 }
172}
173
174impl OptimizedExportOptions {
175 pub fn with_optimization_level(level: OptimizationLevel) -> Self {
177 let mut options = Self::default();
178 options.optimization_level = level;
179
180 match level {
181 OptimizationLevel::Low => {
182 options.parallel_processing = false;
183 options.use_streaming_writer = false;
184 options.enable_schema_validation = false;
185 options.enable_enhanced_ffi_analysis = false;
186 options.enable_boundary_event_processing = false;
187 options.enable_memory_passport_tracking = false;
188 options.enable_adaptive_optimization = false;
189 options.enable_security_analysis = false;
190 }
191 OptimizationLevel::Medium => {
192 options.parallel_processing = true;
193 options.use_streaming_writer = false;
194 options.enable_schema_validation = true;
195 options.enable_enhanced_ffi_analysis = true;
196 options.enable_boundary_event_processing = false;
197 options.enable_memory_passport_tracking = false;
198 }
199 OptimizationLevel::High => {
200 }
202 OptimizationLevel::Maximum => {
203 options.buffer_size = 512 * 1024; options.batch_size = 2000;
205 }
207 }
208
209 options
210 }
211
212 pub fn parallel_processing(mut self, enabled: bool) -> Self {
214 self.parallel_processing = enabled;
215 self
216 }
217
218 pub fn buffer_size(mut self, size: usize) -> Self {
220 self.buffer_size = size;
221 self
222 }
223
224 pub fn batch_size(mut self, size: usize) -> Self {
226 self.batch_size = size;
227 self
228 }
229
230 pub fn streaming_writer(mut self, enabled: bool) -> Self {
232 self.use_streaming_writer = enabled;
233 self
234 }
235
236 pub fn schema_validation(mut self, enabled: bool) -> Self {
238 self.enable_schema_validation = enabled;
239 self
240 }
241
242 pub fn adaptive_optimization(mut self, enabled: bool) -> Self {
244 self.enable_adaptive_optimization = enabled;
245 self
246 }
247
248 pub fn max_cache_size(mut self, size: usize) -> Self {
250 self.max_cache_size = size;
251 self
252 }
253
254 pub fn security_analysis(mut self, enabled: bool) -> Self {
256 self.enable_security_analysis = enabled;
257 self
258 }
259
260 pub fn include_low_severity(mut self, include: bool) -> Self {
262 self.include_low_severity_violations = include;
263 self
264 }
265
266 pub fn integrity_hashes(mut self, enabled: bool) -> Self {
268 self.generate_integrity_hashes = enabled;
269 self
270 }
271
272 pub fn fast_export_mode(mut self, enabled: bool) -> Self {
274 self.enable_fast_export_mode = enabled;
275 self
276 }
277
278 pub fn auto_fast_export_threshold(mut self, threshold: Option<usize>) -> Self {
280 self.auto_fast_export_threshold = threshold;
281 self
282 }
283
284 pub fn thread_count(mut self, count: Option<usize>) -> Self {
286 self.thread_count = count;
287 self
288 }
289}
290
291static TYPE_CACHE: LazyLock<std::sync::Mutex<HashMap<String, String>>> =
293 LazyLock::new(|| std::sync::Mutex::new(HashMap::new()));
294
295fn get_or_compute_type_info(type_name: &str, size: usize) -> String {
297 if let Ok(mut cache) = TYPE_CACHE.lock() {
298 let key = format!("{}:{}", type_name, size);
299 if let Some(cached) = cache.get(&key) {
300 return cached.clone();
301 }
302 let type_info = compute_enhanced_type_info(type_name, size);
303 cache.insert(key, type_info.clone());
304 type_info
305 } else {
306 compute_enhanced_type_info(type_name, size)
307 }
308}
309
310fn compute_enhanced_type_info(type_name: &str, size: usize) -> String {
312 if type_name.contains("Vec<") {
313 "Vec<T>".to_string()
314 } else if type_name.contains("HashMap") {
315 "HashMap<K,V>".to_string()
316 } else if type_name.contains("String") {
317 "String".to_string()
318 } else {
319 match size {
320 1..=8 => "Primitive".to_string(),
321 9..=32 => "SmallStruct".to_string(),
322 33..=128 => "MediumStruct".to_string(),
323 129..=1024 => "LargeStruct".to_string(),
324 _ => "Buffer".to_string(),
325 }
326 }
327}
328
329pub fn clear_type_cache() {
331 if let Ok(mut cache) = TYPE_CACHE.lock() {
332 cache.clear();
333 }
334}
335
336#[allow(dead_code)]
338fn process_allocation_batch(
339 allocations: &[AllocationInfo],
340) -> TrackingResult<Vec<serde_json::Value>> {
341 let options = OptimizedExportOptions::default();
342 process_allocation_batch_enhanced(allocations, &options)
343}
344
345fn process_allocation_batch_enhanced(
347 allocations: &[AllocationInfo],
348 options: &OptimizedExportOptions,
349) -> TrackingResult<Vec<serde_json::Value>> {
350 let mut processed = Vec::with_capacity(allocations.len());
351
352 for alloc in allocations {
353 let enhanced_type = if let Some(type_name) = &alloc.type_name {
354 get_or_compute_type_info(type_name, alloc.size)
355 } else {
356 compute_enhanced_type_info("Unknown", alloc.size)
357 };
358
359 let mut allocation_data = serde_json::json!({
360 "ptr": format!("0x{:x}", alloc.ptr),
361 "size": alloc.size,
362 "type_name": enhanced_type,
363 "var_name": alloc.var_name.as_deref().unwrap_or("unnamed"),
364 "scope": alloc.scope_name.as_deref().unwrap_or("global"),
365 "timestamp_alloc": alloc.timestamp_alloc,
366 "timestamp_dealloc": alloc.timestamp_dealloc,
367 "is_active": alloc.is_active()
368 });
369
370 if options.enable_enhanced_ffi_analysis {
372 if let Some(ffi_info) = analyze_ffi_allocation(alloc) {
373 allocation_data["ffi_analysis"] = ffi_info;
374 }
375 }
376
377 if options.enable_boundary_event_processing {
379 if let Some(boundary_info) = analyze_boundary_events(alloc) {
380 allocation_data["boundary_events"] = boundary_info;
381 }
382 }
383
384 if options.enable_memory_passport_tracking {
386 if let Some(passport_info) = get_memory_passport_info(alloc.ptr) {
387 allocation_data["memory_passport"] = passport_info;
388 }
389 }
390
391 processed.push(allocation_data);
392 }
393
394 Ok(processed)
395}
396
397#[allow(dead_code)]
399fn analyze_ffi_allocation(alloc: &AllocationInfo) -> Option<serde_json::Value> {
400 if let Some(type_name) = &alloc.type_name {
402 if type_name.contains("*mut")
403 || type_name.contains("*const")
404 || type_name.contains("extern")
405 || type_name.contains("libc::")
406 {
407 return Some(serde_json::json!({
408 "is_ffi_related": true,
409 "ffi_type": if type_name.contains("*mut") || type_name.contains("*const") {
410 "raw_pointer"
411 } else {
412 "external_library"
413 },
414 "risk_level": if type_name.contains("*mut") { "high" } else { "medium" },
415 "safety_concerns": [
416 "Manual memory management required",
417 "No automatic bounds checking",
418 "Potential for memory safety violations"
419 ]
420 }));
421 }
422 }
423
424 if let Some(var_name) = &alloc.var_name {
425 if var_name.contains("ffi") || var_name.contains("extern") || var_name.contains("c_") {
426 return Some(serde_json::json!({
427 "is_ffi_related": true,
428 "ffi_type": "ffi_variable",
429 "risk_level": "medium",
430 "detected_from": "variable_name"
431 }));
432 }
433 }
434
435 None
436}
437
438fn analyze_boundary_events(alloc: &AllocationInfo) -> Option<serde_json::Value> {
440 let tracker = get_global_unsafe_ffi_tracker();
442 if let Ok(allocations) = tracker.get_enhanced_allocations() {
443 for enhanced_alloc in allocations {
444 if enhanced_alloc.base.ptr == alloc.ptr
445 && !enhanced_alloc.cross_boundary_events.is_empty()
446 {
447 let events: Vec<serde_json::Value> = enhanced_alloc
448 .cross_boundary_events
449 .iter()
450 .map(|event| {
451 serde_json::json!({
452 "event_type": format!("{:?}", event.event_type),
453 "from_context": event.from_context,
454 "to_context": event.to_context,
455 "timestamp": event.timestamp
456 })
457 })
458 .collect();
459
460 return Some(serde_json::json!({
461 "has_boundary_events": true,
462 "event_count": events.len(),
463 "events": events
464 }));
465 }
466 }
467 }
468
469 None
470}
471
472fn get_memory_passport_info(ptr: usize) -> Option<serde_json::Value> {
474 let tracker = get_global_unsafe_ffi_tracker();
475 if let Ok(passports) = tracker.get_memory_passports() {
476 if let Some(passport) = passports.get(&ptr) {
477 return Some(serde_json::json!({
478 "passport_id": passport.passport_id,
479 "origin_context": passport.origin.context,
480 "current_owner": passport.current_owner.owner_context,
481 "validity_status": format!("{:?}", passport.validity_status),
482 "security_clearance": format!("{:?}", passport.security_clearance),
483 "journey_length": passport.journey.len(),
484 "last_stamp": passport.journey.last().map(|stamp| serde_json::json!({
485 "operation": stamp.operation,
486 "location": stamp.location,
487 "timestamp": stamp.timestamp
488 }))
489 }));
490 }
491 }
492
493 None
494}
495
496fn write_json_optimized<P: AsRef<Path>>(
498 path: P,
499 data: &serde_json::Value,
500 options: &OptimizedExportOptions,
501) -> TrackingResult<()> {
502 let path = path.as_ref();
503
504 if options.enable_schema_validation && !options.enable_fast_export_mode {
506 let validator = SchemaValidator::new();
507 if let Ok(validation_result) = validator.validate_unsafe_ffi_analysis(data) {
508 if !validation_result.is_valid {
509 eprintln!("⚠️ Schema validation warnings:");
510 for error in validation_result.errors {
511 eprintln!(" - {}: {}", error.code, error.message);
512 }
513 for warning in validation_result.warnings {
514 eprintln!(" - {}: {}", warning.warning_code, warning.message);
515 }
516 }
517 }
518 } else if options.enable_fast_export_mode {
519 }
521
522 let estimated_size = estimate_json_size(data);
524 let use_compact = options
525 .use_compact_format
526 .unwrap_or(estimated_size > 1_000_000); if false && options.use_streaming_writer && estimated_size > 500_000 {
531 let _file = File::create(path)?;
532 } else {
536 let file = File::create(path)?;
538 let mut writer = BufWriter::with_capacity(options.buffer_size, file);
539
540 if use_compact {
541 serde_json::to_writer(&mut writer, data)?;
542 } else {
543 serde_json::to_writer_pretty(&mut writer, data)?;
544 }
545
546 writer.flush()?;
547 }
548
549 Ok(())
550}
551
552fn estimate_json_size(data: &serde_json::Value) -> usize {
554 match data {
556 serde_json::Value::Object(obj) => {
557 obj.len() * 50 + obj.values().map(estimate_json_size).sum::<usize>()
558 }
559 serde_json::Value::Array(arr) => {
560 arr.len() * 20 + arr.iter().map(estimate_json_size).sum::<usize>()
561 }
562 serde_json::Value::String(s) => s.len() + 10,
563 _ => 20,
564 }
565}
566
567#[allow(dead_code)]
569fn convert_legacy_options_to_optimized(
570 legacy: crate::core::tracker::ExportOptions,
571) -> OptimizedExportOptions {
572 let mut optimized = OptimizedExportOptions::default();
573
574 optimized.buffer_size = legacy.buffer_size;
576 optimized.use_compact_format = Some(!legacy.verbose_logging); if legacy.include_system_allocations {
580 optimized.optimization_level = OptimizationLevel::Maximum;
582 optimized.enable_enhanced_ffi_analysis = true;
583 optimized.enable_boundary_event_processing = true;
584 optimized.enable_memory_passport_tracking = true;
585 optimized.enable_security_analysis = true;
586 } else {
587 optimized.optimization_level = OptimizationLevel::High;
589 }
590
591 if legacy.compress_output {
593 optimized.use_compact_format = Some(true);
594 optimized.buffer_size = optimized.buffer_size.max(512 * 1024); }
596
597 optimized.parallel_processing =
599 legacy.include_system_allocations || legacy.buffer_size > 128 * 1024;
600
601 println!("🔄 Converted legacy ExportOptions to OptimizedExportOptions:");
602 println!(
603 " - Optimization level: {:?}",
604 optimized.optimization_level
605 );
606 println!(" - Buffer size: {} KB", optimized.buffer_size / 1024);
607 println!(
608 " - Parallel processing: {}",
609 optimized.parallel_processing
610 );
611 println!(
612 " - Enhanced features: {}",
613 optimized.enable_enhanced_ffi_analysis
614 );
615
616 optimized
617}
618
619impl MemoryTracker {
621 pub fn export_to_json_fast<P: AsRef<Path>>(&self, path: P) -> TrackingResult<()> {
641 let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::Low)
642 .parallel_processing(true)
643 .streaming_writer(false)
644 .schema_validation(false)
645 .fast_export_mode(true) .auto_fast_export_threshold(Some(1000)); self.export_to_json_with_optimized_options(path, options)
649 }
650
651 pub fn export_to_json_comprehensive<P: AsRef<Path>>(&self, path: P) -> TrackingResult<()> {
665 let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::Maximum)
666 .security_analysis(true)
667 .adaptive_optimization(true);
668
669 self.export_to_json_with_optimized_options(path, options)
670 }
671
672 pub fn show_export_upgrade_path(&self) {
677 println!("📚 MemoryTracker Export API Upgrade Guide");
678 println!("=========================================");
679 println!();
680 println!("🔄 BACKWARD COMPATIBLE (no changes needed):");
681 println!(" tracker.export_to_json(\"file.json\")?;");
682 println!(" tracker.export_to_json_with_options(\"file\", ExportOptions::new())?;");
683 println!();
684 println!("🚀 NEW OPTIMIZED API (recommended):");
685 println!(" // Basic optimized export");
686 println!(" tracker.export_to_json_with_optimized_options(\"analysis\", OptimizedExportOptions::default())?;");
687 println!();
688 println!(" // Fast export for production");
689 println!(" tracker.export_to_json_fast(\"prod_snapshot\")?;");
690 println!();
691 println!(" // Comprehensive export for debugging");
692 println!(" tracker.export_to_json_comprehensive(\"debug_analysis\")?;");
693 println!();
694 println!(" // Custom configuration with fast export");
695 println!(" let options = OptimizedExportOptions::with_optimization_level(OptimizationLevel::High)");
696 println!(" .parallel_processing(true)");
697 println!(" .security_analysis(true)");
698 println!(" .fast_export_mode(true)");
699 println!(" .auto_fast_export_threshold(Some(10000));");
700 println!(" tracker.export_to_json_with_optimized_options(\"custom\", options)?;");
701 println!();
702 println!(" // Auto mode selection (recommended)");
703 println!(" let options = OptimizedExportOptions::default()");
704 println!(
705 " .auto_fast_export_threshold(Some(5000)); // Auto-enable for >5000 allocations"
706 );
707 println!(" tracker.export_to_json_with_optimized_options(\"auto\", options)?;");
708 println!();
709 println!("💡 MIGRATION BENEFITS:");
710 println!(" ✅ 5-10x faster export performance with fast export coordinator");
711 println!(" ✅ Automatic mode selection based on dataset size");
712 println!(" ✅ Parallel shard processing for large datasets");
713 println!(" ✅ Enhanced FFI and unsafe code analysis");
714 println!(" ✅ Security violation detection");
715 println!(" ✅ Streaming JSON writer for large datasets");
716 println!(" ✅ Adaptive performance optimization");
717 println!(" ✅ Schema validation and data integrity");
718 println!(" ✅ Multiple specialized output files");
719 println!(" ✅ Configurable thread count and buffer sizes");
720 println!();
721 println!("🔧 OPTIMIZATION LEVELS:");
722 println!(" - Low: Fast export, basic features");
723 println!(" - Medium: Balanced performance and features");
724 println!(" - High: Full features, good performance (default)");
725 println!(" - Maximum: All features, maximum analysis depth");
726 }
727
728 pub fn get_export_capabilities(&self) -> TrackingResult<serde_json::Value> {
732 let allocations = self.get_active_allocations()?;
733 let stats = self.get_stats()?;
734
735 let ffi_tracker_available = {
737 let tracker = get_global_unsafe_ffi_tracker();
738 tracker.get_enhanced_allocations().is_ok()
739 };
740
741 let security_analyzer_available = SECURITY_ANALYZER.lock().is_ok();
743
744 let adaptive_optimizer_available = ADAPTIVE_OPTIMIZER.lock().is_ok();
746
747 Ok(serde_json::json!({
748 "export_capabilities": {
749 "api_version": "2.0",
750 "backward_compatible": true,
751 "available_methods": [
752 "export_to_json",
753 "export_to_json_with_options",
754 "export_to_json_with_optimized_options",
755 "export_to_json_fast",
756 "export_to_json_comprehensive"
757 ],
758 "optimization_levels": ["Low", "Medium", "High", "Maximum"],
759 "output_formats": ["single_file", "multi_file", "streaming"]
760 },
761 "system_status": {
762 "total_allocations": allocations.len(),
763 "memory_usage_mb": stats.active_memory / (1024 * 1024),
764 "ffi_tracker_available": ffi_tracker_available,
765 "security_analyzer_available": security_analyzer_available,
766 "adaptive_optimizer_available": adaptive_optimizer_available
767 },
768 "feature_availability": {
769 "enhanced_ffi_analysis": ffi_tracker_available,
770 "boundary_event_processing": ffi_tracker_available,
771 "memory_passport_tracking": ffi_tracker_available,
772 "security_violation_analysis": security_analyzer_available,
773 "adaptive_performance_optimization": adaptive_optimizer_available,
774 "streaming_json_writer": true,
775 "schema_validation": true,
776 "parallel_processing": true
777 },
778 "recommended_settings": {
779 "small_datasets": "OptimizationLevel::Low or export_to_json_fast()",
780 "medium_datasets": "OptimizationLevel::Medium or default settings",
781 "large_datasets": "OptimizationLevel::High with streaming enabled",
782 "security_audit": "OptimizationLevel::Maximum or export_to_json_comprehensive()",
783 "production_monitoring": "OptimizationLevel::Low with minimal features"
784 }
785 }))
786 }
787 pub fn export_to_json_with_optimized_options<P: AsRef<Path>>(
809 &self,
810 base_path: P,
811 options: OptimizedExportOptions,
812 ) -> TrackingResult<()> {
813 let start_time = std::time::Instant::now();
814
815 let allocations = self.get_active_allocations()?;
817 let allocation_count = allocations.len();
818
819 let should_use_fast_export = options.enable_fast_export_mode
821 || (options
822 .auto_fast_export_threshold
823 .map_or(false, |threshold| {
824 allocation_count > threshold
825 && options.optimization_level != OptimizationLevel::Low
826 }));
827
828 if should_use_fast_export {
830 println!(
831 "🚀 Using fast export coordinator for high-performance export (allocations: {})",
832 allocation_count
833 );
834
835 let mut config_builder = FastExportConfigBuilder::new()
836 .shard_size(options.batch_size)
837 .buffer_size(options.buffer_size)
838 .performance_monitoring(true)
839 .verbose_logging(false);
840
841 if let Some(thread_count) = options.thread_count {
842 config_builder = config_builder.max_threads(Some(thread_count));
843 }
844
845 let fast_config = config_builder.build();
846
847 let mut coordinator = FastExportCoordinator::new(fast_config);
848
849 let base_name = base_path
851 .as_ref()
852 .file_stem()
853 .and_then(|s| s.to_str())
854 .unwrap_or("export");
855
856 let project_name = if base_name.ends_with("_snapshot") {
857 base_name.trim_end_matches("_snapshot")
858 } else {
859 base_name
860 };
861
862 let base_memory_analysis_dir = Path::new("MemoryAnalysis");
863 let project_dir = base_memory_analysis_dir.join(project_name);
864 if let Err(e) = std::fs::create_dir_all(&project_dir) {
865 eprintln!(
866 "Warning: Failed to create project directory {}: {}",
867 project_dir.display(),
868 e
869 );
870 }
871
872 let output_path = project_dir.join(format!("{}_memory_analysis.json", base_name));
873
874 match coordinator.export_fast(output_path.to_string_lossy().as_ref()) {
875 Ok(stats) => {
876 println!("✅ Fast export completed:");
877 println!(
878 " Total allocations: {}",
879 stats.total_allocations_processed
880 );
881 println!(" Total time: {}ms", stats.total_export_time_ms);
882 println!(
883 " Data gathering: {}ms",
884 stats.data_gathering.total_time_ms
885 );
886 println!(
887 " Parallel processing: {}ms",
888 stats.parallel_processing.total_processing_time_ms
889 );
890 println!(
891 " Write time: {}ms",
892 stats.write_performance.total_write_time_ms
893 );
894 println!(
895 " Threads used: {}",
896 stats.parallel_processing.threads_used
897 );
898 println!(
899 " Performance improvement: {:.2}x",
900 stats.performance_improvement_factor
901 );
902 println!(" Output file: {}", output_path.display());
903
904 if options.enable_fast_export_mode {
906 println!(
907 "⚡ Fast export mode: generating all analysis files without validation"
908 );
909 }
911
912 if options.optimization_level == OptimizationLevel::High
914 || options.optimization_level == OptimizationLevel::Maximum
915 || options.enable_fast_export_mode
916 {
917 println!("📝 Generating other analysis files...");
918 } else {
920 return Ok(());
921 }
922 }
923 Err(e) => {
924 eprintln!(
925 "⚠️ Fast export failed, falling back to traditional export: {}",
926 e
927 );
928 }
929 }
930 }
931
932 println!(
933 "🚀 Starting unified JSON export with optimization level: {:?}",
934 options.optimization_level
935 );
936
937 let base_path = base_path.as_ref();
938 let base_name = base_path
939 .file_stem()
940 .and_then(|s| s.to_str())
941 .unwrap_or("export");
942
943 let project_name = if base_name.ends_with("_snapshot") {
945 base_name.trim_end_matches("_snapshot")
946 } else {
947 base_name
948 };
949
950 let base_memory_analysis_dir = Path::new("MemoryAnalysis");
952 let project_dir = base_memory_analysis_dir.join(project_name);
953 if let Err(e) = std::fs::create_dir_all(&project_dir) {
954 eprintln!(
955 "Warning: Failed to create project directory {}: {}",
956 project_dir.display(),
957 e
958 );
959 }
960 let parent_dir = &project_dir;
961
962 let stats = self.get_stats()?;
964
965 println!(
966 "📊 Processing {} allocations with integrated pipeline...",
967 allocations.len()
968 );
969
970 if options.enable_security_analysis {
972 if let Ok(mut analyzer) = SECURITY_ANALYZER.lock() {
973 analyzer.update_allocations(allocations.clone());
974 }
975 }
976
977 let file_types = if options.enable_fast_export_mode {
979 let mut types = vec![
981 JsonFileType::MemoryAnalysis,
982 JsonFileType::Lifetime,
983 JsonFileType::UnsafeFfi,
984 JsonFileType::Performance,
985 JsonFileType::ComplexTypes,
986 ];
987 if options.enable_security_analysis {
988 types.push(JsonFileType::SecurityViolations);
989 }
990 types
991 } else {
992 match options.optimization_level {
993 OptimizationLevel::Low => {
994 vec![JsonFileType::MemoryAnalysis, JsonFileType::Performance]
995 }
996 OptimizationLevel::Medium => vec![
997 JsonFileType::MemoryAnalysis,
998 JsonFileType::Lifetime,
999 JsonFileType::Performance,
1000 ],
1001 OptimizationLevel::High | OptimizationLevel::Maximum => {
1002 let mut types = vec![
1003 JsonFileType::MemoryAnalysis,
1004 JsonFileType::Lifetime,
1005 JsonFileType::UnsafeFfi,
1006 JsonFileType::Performance,
1007 JsonFileType::ComplexTypes,
1008 ];
1009 if options.enable_security_analysis {
1010 types.push(JsonFileType::SecurityViolations);
1011 }
1012 types
1013 }
1014 }
1015 };
1016
1017 for file_type in &file_types {
1019 let (filename, data) = match file_type {
1020 JsonFileType::MemoryAnalysis => {
1021 let filename = format!("{}_memory_analysis.json", base_name);
1022 let data = create_integrated_memory_analysis(&allocations, &stats, &options)?;
1023 (filename, data)
1024 }
1025 JsonFileType::Lifetime => {
1026 let filename = format!("{}_lifetime.json", base_name);
1027 let data = create_integrated_lifetime_analysis(&allocations, &options)?;
1028 (filename, data)
1029 }
1030 JsonFileType::UnsafeFfi => {
1031 let filename = format!("{}_unsafe_ffi.json", base_name);
1032 let data = create_integrated_unsafe_ffi_analysis(&allocations, &options)?;
1033 (filename, data)
1034 }
1035 JsonFileType::Performance => {
1036 let filename = format!("{}_performance.json", base_name);
1037 let data = create_integrated_performance_analysis(
1038 &allocations,
1039 &stats,
1040 start_time,
1041 &options,
1042 )?;
1043 (filename, data)
1044 }
1045 JsonFileType::ComplexTypes => {
1046 let filename = format!("{}_complex_types.json", base_name);
1047 let data = create_optimized_complex_types_analysis(&allocations, &options)?;
1048 (filename, data)
1049 }
1050 JsonFileType::SecurityViolations => {
1051 let filename = format!("{}_security_violations.json", base_name);
1052 let data = create_security_violation_analysis(&allocations, &options)?;
1053 (filename, data)
1054 }
1055 };
1056
1057 let file_path = parent_dir.join(filename);
1058 write_json_optimized(&file_path, &data, &options)?;
1059 println!(
1060 " ✅ Generated: {}",
1061 file_path.file_name().unwrap().to_string_lossy()
1062 );
1063 }
1064
1065 let total_duration = start_time.elapsed();
1066 println!("✅ Unified JSON export completed in {:?}", total_duration);
1067
1068 if options.enable_adaptive_optimization {
1070 let memory_usage_mb = (allocations.len() * 64) / (1024 * 1024); if let Ok(mut optimizer) = ADAPTIVE_OPTIMIZER.lock() {
1072 optimizer.record_batch_performance(
1073 allocations.len(),
1074 total_duration,
1075 memory_usage_mb as u64,
1076 allocations.len(),
1077 );
1078 }
1079 }
1080
1081 println!("💡 Optimization features applied:");
1083 if options.parallel_processing {
1084 println!(" - Parallel processing enabled");
1085 }
1086 if options.use_streaming_writer {
1087 println!(" - Streaming JSON writer enabled");
1088 }
1089 if options.enable_schema_validation {
1090 println!(" - Schema validation enabled");
1091 }
1092 if options.enable_enhanced_ffi_analysis {
1093 println!(" - Enhanced FFI analysis enabled");
1094 }
1095 if options.enable_boundary_event_processing {
1096 println!(" - Boundary event processing enabled");
1097 }
1098 if options.enable_memory_passport_tracking {
1099 println!(" - Memory passport tracking enabled");
1100 }
1101 if options.enable_security_analysis {
1102 println!(" - Security violation analysis enabled");
1103 }
1104 if options.enable_adaptive_optimization {
1105 println!(" - Adaptive performance optimization enabled");
1106
1107 if let Ok(optimizer) = ADAPTIVE_OPTIMIZER.lock() {
1109 let report = optimizer.get_performance_report();
1110 if let Some(batch_size) =
1111 report["adaptive_optimization"]["current_batch_size"].as_u64()
1112 {
1113 println!(" - Current optimal batch size: {}", batch_size);
1114 }
1115 if let Some(hit_ratio) =
1116 report["adaptive_optimization"]["cache_statistics"]["hit_ratio"].as_f64()
1117 {
1118 println!(" - Cache hit ratio: {:.1}%", hit_ratio * 100.0);
1119 }
1120 }
1121 }
1122
1123 Ok(())
1124 }
1125
1126 pub fn test_export_backward_compatibility(&self) -> TrackingResult<serde_json::Value> {
1131 let start_time = std::time::Instant::now();
1132 let mut test_results = Vec::new();
1133
1134 let test1_start = std::time::Instant::now();
1136 match self.export_to_json("test_compatibility_basic.json") {
1137 Ok(_) => {
1138 test_results.push(serde_json::json!({
1139 "test": "export_to_json",
1140 "status": "passed",
1141 "duration_ms": test1_start.elapsed().as_millis(),
1142 "description": "Basic JSON export maintains compatibility"
1143 }));
1144 }
1145 Err(e) => {
1146 test_results.push(serde_json::json!({
1147 "test": "export_to_json",
1148 "status": "failed",
1149 "error": e.to_string(),
1150 "duration_ms": test1_start.elapsed().as_millis()
1151 }));
1152 }
1153 }
1154
1155 let test2_start = std::time::Instant::now();
1157 let fast_options = OptimizedExportOptions::default().fast_export_mode(true);
1158 match self.export_to_json_with_optimized_options("test_compatibility_fast", fast_options) {
1159 Ok(_) => {
1160 test_results.push(serde_json::json!({
1161 "test": "fast_export_mode",
1162 "status": "passed",
1163 "duration_ms": test2_start.elapsed().as_millis(),
1164 "description": "Fast export mode works correctly"
1165 }));
1166 }
1167 Err(e) => {
1168 test_results.push(serde_json::json!({
1169 "test": "fast_export_mode",
1170 "status": "failed",
1171 "error": e.to_string(),
1172 "duration_ms": test2_start.elapsed().as_millis()
1173 }));
1174 }
1175 }
1176
1177 let test3_start = std::time::Instant::now();
1179 let auto_options = OptimizedExportOptions::default().auto_fast_export_threshold(Some(1)); match self.export_to_json_with_optimized_options("test_compatibility_auto", auto_options) {
1181 Ok(_) => {
1182 test_results.push(serde_json::json!({
1183 "test": "auto_mode_selection",
1184 "status": "passed",
1185 "duration_ms": test3_start.elapsed().as_millis(),
1186 "description": "Auto mode selection works correctly"
1187 }));
1188 }
1189 Err(e) => {
1190 test_results.push(serde_json::json!({
1191 "test": "auto_mode_selection",
1192 "status": "failed",
1193 "error": e.to_string(),
1194 "duration_ms": test3_start.elapsed().as_millis()
1195 }));
1196 }
1197 }
1198
1199 for level in [
1201 OptimizationLevel::Low,
1202 OptimizationLevel::Medium,
1203 OptimizationLevel::High,
1204 OptimizationLevel::Maximum,
1205 ] {
1206 let test_start = std::time::Instant::now();
1207 let level_options =
1208 OptimizedExportOptions::with_optimization_level(level).fast_export_mode(false); let test_name = format!("optimization_level_{:?}", level);
1210
1211 match self.export_to_json_with_optimized_options(
1212 &format!("test_compatibility_{:?}", level),
1213 level_options,
1214 ) {
1215 Ok(_) => {
1216 test_results.push(serde_json::json!({
1217 "test": test_name,
1218 "status": "passed",
1219 "duration_ms": test_start.elapsed().as_millis(),
1220 "description": format!("Optimization level {:?} works correctly", level)
1221 }));
1222 }
1223 Err(e) => {
1224 test_results.push(serde_json::json!({
1225 "test": test_name,
1226 "status": "failed",
1227 "error": e.to_string(),
1228 "duration_ms": test_start.elapsed().as_millis()
1229 }));
1230 }
1231 }
1232 }
1233
1234 let total_duration = start_time.elapsed();
1235 let passed_tests = test_results
1236 .iter()
1237 .filter(|t| t["status"] == "passed")
1238 .count();
1239 let total_tests = test_results.len();
1240
1241 Ok(serde_json::json!({
1242 "backward_compatibility_test": {
1243 "summary": {
1244 "total_tests": total_tests,
1245 "passed_tests": passed_tests,
1246 "failed_tests": total_tests - passed_tests,
1247 "success_rate": (passed_tests as f64 / total_tests as f64) * 100.0,
1248 "total_duration_ms": total_duration.as_millis()
1249 },
1250 "test_results": test_results,
1251 "compatibility_status": if passed_tests == total_tests { "fully_compatible" } else { "partial_compatibility" },
1252 "recommendations": if passed_tests == total_tests {
1253 vec!["All backward compatibility tests passed. Safe to use new optimized export system."]
1254 } else {
1255 vec!["Some compatibility tests failed. Review failed tests before deploying."]
1256 }
1257 }
1258 }))
1259 }
1260
1261 pub fn get_adaptive_performance_report(&self) -> TrackingResult<serde_json::Value> {
1266 if let Ok(optimizer) = ADAPTIVE_OPTIMIZER.lock() {
1267 Ok(optimizer.get_performance_report())
1268 } else {
1269 Ok(serde_json::json!({
1270 "error": "Unable to access adaptive performance optimizer",
1271 "adaptive_optimization": {
1272 "enabled": false
1273 }
1274 }))
1275 }
1276 }
1277
1278 pub fn reset_adaptive_optimizer(&self) -> TrackingResult<()> {
1283 if let Ok(mut optimizer) = ADAPTIVE_OPTIMIZER.lock() {
1284 optimizer.reset();
1285 println!("🔄 Adaptive performance optimizer reset");
1286 }
1287 Ok(())
1288 }
1289
1290 pub fn configure_adaptive_optimization(
1294 &self,
1295 enabled: bool,
1296 cache_size: Option<usize>,
1297 initial_batch_size: Option<usize>,
1298 ) -> TrackingResult<()> {
1299 if let Ok(mut optimizer) = ADAPTIVE_OPTIMIZER.lock() {
1300 optimizer.set_optimization_enabled(enabled);
1301
1302 if enabled {
1303 if let Some(cache_size) = cache_size {
1304 *optimizer = AdaptivePerformanceOptimizer::new(
1306 initial_batch_size.unwrap_or(1000),
1307 cache_size,
1308 );
1309 }
1310 println!("🔧 Adaptive optimization configured: enabled={}, cache_size={:?}, batch_size={:?}",
1311 enabled, cache_size, initial_batch_size);
1312 } else {
1313 println!("🔧 Adaptive optimization disabled");
1314 }
1315 }
1316 Ok(())
1317 }
1318
1319 pub fn get_security_violation_report(&self) -> TrackingResult<serde_json::Value> {
1324 let allocations = self.get_active_allocations()?;
1325 let options = OptimizedExportOptions::default();
1326 create_security_violation_analysis(&allocations, &options)
1327 }
1328
1329 pub fn get_security_violations_by_severity(
1333 &self,
1334 min_severity: ViolationSeverity,
1335 ) -> TrackingResult<Vec<serde_json::Value>> {
1336 if let Ok(analyzer) = SECURITY_ANALYZER.lock() {
1337 let reports = analyzer.get_reports_by_severity(min_severity);
1338 let json_reports = reports
1339 .iter()
1340 .map(|report| {
1341 serde_json::json!({
1342 "violation_id": report.violation_id,
1343 "violation_type": report.violation_type,
1344 "severity": format!("{:?}", report.severity),
1345 "description": report.description,
1346 "overall_risk_score": report.impact_assessment.overall_risk_score,
1347 "generated_at_ns": report.generated_at_ns
1348 })
1349 })
1350 .collect();
1351 Ok(json_reports)
1352 } else {
1353 Ok(Vec::new())
1354 }
1355 }
1356
1357 pub fn verify_security_report_integrity(&self) -> TrackingResult<serde_json::Value> {
1361 if let Ok(analyzer) = SECURITY_ANALYZER.lock() {
1362 let all_reports = analyzer.get_all_reports();
1363 let mut verification_results = Vec::new();
1364 let mut all_verified = true;
1365
1366 for (violation_id, report) in all_reports {
1367 let is_valid = analyzer.verify_report_integrity(report).unwrap_or(false);
1368 if !is_valid {
1369 all_verified = false;
1370 }
1371
1372 verification_results.push(serde_json::json!({
1373 "violation_id": violation_id,
1374 "integrity_verified": is_valid,
1375 "hash": report.integrity_hash
1376 }));
1377 }
1378
1379 Ok(serde_json::json!({
1380 "verification_summary": {
1381 "total_reports": all_reports.len(),
1382 "all_verified": all_verified,
1383 "verification_timestamp": std::time::SystemTime::now()
1384 .duration_since(std::time::UNIX_EPOCH)
1385 .unwrap_or_default()
1386 .as_secs()
1387 },
1388 "individual_results": verification_results
1389 }))
1390 } else {
1391 Ok(serde_json::json!({
1392 "error": "Security analyzer not available"
1393 }))
1394 }
1395 }
1396
1397 pub fn clear_security_violations(&self) -> TrackingResult<()> {
1402 if let Ok(mut analyzer) = SECURITY_ANALYZER.lock() {
1403 analyzer.clear_reports();
1404 println!("🧹 Security violation reports cleared");
1405 }
1406 Ok(())
1407 }
1408
1409 pub fn configure_security_analysis(
1413 &self,
1414 enable_correlation: bool,
1415 include_low_severity: bool,
1416 generate_hashes: bool,
1417 max_related_allocations: Option<usize>,
1418 ) -> TrackingResult<()> {
1419 let config = AnalysisConfig {
1420 max_related_allocations: max_related_allocations.unwrap_or(10),
1421 max_stack_depth: 20,
1422 enable_correlation_analysis: enable_correlation,
1423 include_low_severity,
1424 generate_integrity_hashes: generate_hashes,
1425 };
1426
1427 if let Ok(mut analyzer) = SECURITY_ANALYZER.lock() {
1428 *analyzer = SecurityViolationAnalyzer::new(config);
1429 println!(
1430 "🔧 Security analysis configured: correlation={}, low_severity={}, hashes={}",
1431 enable_correlation, include_low_severity, generate_hashes
1432 );
1433 }
1434
1435 Ok(())
1436 }
1437}
1438
1439impl MemoryTracker {
1441 pub fn export_optimized_json_files<P: AsRef<Path>>(&self, base_path: P) -> TrackingResult<()> {
1443 let options = OptimizedExportOptions::default();
1444 self.export_optimized_json_files_with_options(base_path, options)
1445 }
1446
1447 pub fn export_optimized_json_files_with_complex_types<P: AsRef<Path>>(
1449 &self,
1450 base_path: P,
1451 ) -> TrackingResult<()> {
1452 let options = OptimizedExportOptions::default();
1453 self.export_extensible_json_files_with_options(
1454 base_path,
1455 &JsonFileType::standard_five(),
1456 options,
1457 )
1458 }
1459
1460 pub fn export_optimized_json_files_with_options<P: AsRef<Path>>(
1462 &self,
1463 base_path: P,
1464 options: OptimizedExportOptions,
1465 ) -> TrackingResult<()> {
1466 let start_time = std::time::Instant::now();
1467 println!("🚀 Starting optimized 4-file JSON export...");
1468
1469 let base_path = base_path.as_ref();
1470 let base_name = base_path
1471 .file_stem()
1472 .and_then(|s| s.to_str())
1473 .unwrap_or("export");
1474 let parent_dir = base_path.parent().unwrap_or(Path::new("."));
1475
1476 let allocations = self.get_active_allocations()?;
1478 let stats = self.get_stats()?;
1479
1480 println!(
1481 "📊 Processing {} allocations across 4 standard files...",
1482 allocations.len()
1483 );
1484
1485 let memory_path = parent_dir.join(format!("{}_memory_analysis.json", base_name));
1487 let memory_data = create_optimized_memory_analysis(&allocations, &stats, &options)?;
1488 write_json_optimized(&memory_path, &memory_data, &options)?;
1489
1490 let lifetime_path = parent_dir.join(format!("{}_lifetime.json", base_name));
1492 let lifetime_data = create_optimized_lifetime_analysis(&allocations, &options)?;
1493 write_json_optimized(&lifetime_path, &lifetime_data, &options)?;
1494
1495 let unsafe_path = parent_dir.join(format!("{}_unsafe_ffi.json", base_name));
1497 let unsafe_data = create_optimized_unsafe_ffi_analysis(&allocations, &options)?;
1498 write_json_optimized(&unsafe_path, &unsafe_data, &options)?;
1499
1500 let perf_path = parent_dir.join(format!("{}_performance.json", base_name));
1502 let perf_data =
1503 create_optimized_performance_analysis(&allocations, &stats, start_time, &options)?;
1504 write_json_optimized(&perf_path, &perf_data, &options)?;
1505
1506 let total_duration = start_time.elapsed();
1507 println!(
1508 "✅ Optimized 4-file export completed in {:?}",
1509 total_duration
1510 );
1511 println!("📁 Generated standard files:");
1512 println!(" 1. {}_memory_analysis.json", base_name);
1513 println!(" 2. {}_lifetime.json", base_name);
1514 println!(" 3. {}_unsafe_ffi.json", base_name);
1515 println!(" 4. {}_performance.json", base_name);
1516
1517 if options.parallel_processing {
1519 println!("💡 Applied parallel processing optimization");
1520 }
1521 if options.enable_type_cache {
1522 println!("💡 Applied type inference caching");
1523 }
1524 println!(
1525 "💡 Applied optimized buffering ({} KB)",
1526 options.buffer_size / 1024
1527 );
1528
1529 Ok(())
1530 }
1531
1532 pub fn export_extensible_json_files<P: AsRef<Path>>(
1534 &self,
1535 base_path: P,
1536 file_types: &[JsonFileType],
1537 ) -> TrackingResult<()> {
1538 let options = OptimizedExportOptions::default();
1539 self.export_extensible_json_files_with_options(base_path, file_types, options)
1540 }
1541
1542 pub fn export_extensible_json_files_with_options<P: AsRef<Path>>(
1544 &self,
1545 base_path: P,
1546 file_types: &[JsonFileType],
1547 options: OptimizedExportOptions,
1548 ) -> TrackingResult<()> {
1549 let start_time = std::time::Instant::now();
1550 println!(
1551 "🚀 Starting extensible JSON export for {} files...",
1552 file_types.len()
1553 );
1554
1555 let base_path = base_path.as_ref();
1556 let base_name = base_path
1557 .file_stem()
1558 .and_then(|s| s.to_str())
1559 .unwrap_or("export");
1560 let parent_dir = base_path.parent().unwrap_or(Path::new("."));
1561
1562 let allocations = self.get_active_allocations()?;
1564 let stats = self.get_stats()?;
1565
1566 println!("📊 Processing {} allocations...", allocations.len());
1567
1568 for file_type in file_types {
1570 let (filename, data) = match file_type {
1571 JsonFileType::MemoryAnalysis => {
1572 let filename = format!("{}_memory_analysis.json", base_name);
1573 let data = create_optimized_memory_analysis(&allocations, &stats, &options)?;
1574 (filename, data)
1575 }
1576 JsonFileType::Lifetime => {
1577 let filename = format!("{}_lifetime.json", base_name);
1578 let data = create_optimized_lifetime_analysis(&allocations, &options)?;
1579 (filename, data)
1580 }
1581 JsonFileType::UnsafeFfi => {
1582 let filename = format!("{}_unsafe_ffi.json", base_name);
1583 let data = create_optimized_unsafe_ffi_analysis(&allocations, &options)?;
1584 (filename, data)
1585 }
1586 JsonFileType::Performance => {
1587 let filename = format!("{}_performance.json", base_name);
1588 let data = create_optimized_performance_analysis(
1589 &allocations,
1590 &stats,
1591 start_time,
1592 &options,
1593 )?;
1594 (filename, data)
1595 }
1596 JsonFileType::ComplexTypes => {
1597 let filename = format!("{}_complex_types.json", base_name);
1598 let data = create_optimized_complex_types_analysis(&allocations, &options)?;
1599 (filename, data)
1600 }
1601 JsonFileType::SecurityViolations => todo!(), };
1605
1606 let file_path = parent_dir.join(filename);
1607 write_json_optimized(&file_path, &data, &options)?;
1608 println!(
1609 " ✅ Generated: {}",
1610 file_path.file_name().unwrap().to_string_lossy()
1611 );
1612 }
1613
1614 let total_duration = start_time.elapsed();
1615 println!("✅ Extensible export completed in {:?}", total_duration);
1616
1617 Ok(())
1618 }
1619}
1620
1621fn create_optimized_memory_analysis(
1623 allocations: &[AllocationInfo],
1624 stats: &crate::core::types::MemoryStats,
1625 options: &OptimizedExportOptions,
1626) -> TrackingResult<serde_json::Value> {
1627 let processed_allocations = process_allocations_optimized(allocations, options)?;
1628
1629 Ok(serde_json::json!({
1630 "metadata": {
1631 "analysis_type": "memory_analysis_optimized",
1632 "optimization_level": "high",
1633 "total_allocations": allocations.len(),
1634 "export_version": "2.0",
1635 "timestamp": std::time::SystemTime::now()
1636 .duration_since(std::time::UNIX_EPOCH)
1637 .unwrap_or_default()
1638 .as_secs()
1639 },
1640 "memory_stats": {
1641 "total_allocated": stats.total_allocated,
1642 "active_memory": stats.active_memory,
1643 "peak_memory": stats.peak_memory,
1644 "total_allocations": stats.total_allocations
1645 },
1646 "allocations": processed_allocations
1647 }))
1648}
1649
1650fn create_optimized_lifetime_analysis(
1652 allocations: &[AllocationInfo],
1653 _options: &OptimizedExportOptions,
1654) -> TrackingResult<serde_json::Value> {
1655 let mut scope_analysis: HashMap<String, (usize, usize, Vec<usize>)> = HashMap::new();
1657
1658 for alloc in allocations {
1659 let scope = alloc.scope_name.as_deref().unwrap_or("global");
1660 let entry = scope_analysis
1661 .entry(scope.to_string())
1662 .or_insert((0, 0, Vec::new()));
1663 entry.0 += alloc.size; entry.1 += 1; entry.2.push(alloc.size); }
1667
1668 let mut scope_stats: Vec<_> = scope_analysis
1670 .into_iter()
1671 .map(|(scope, (total_size, count, sizes))| {
1672 let avg_size = if count > 0 { total_size / count } else { 0 };
1673 let max_size = sizes.iter().max().copied().unwrap_or(0);
1674 let min_size = sizes.iter().min().copied().unwrap_or(0);
1675
1676 serde_json::json!({
1677 "scope_name": scope,
1678 "total_size": total_size,
1679 "allocation_count": count,
1680 "average_size": avg_size,
1681 "max_size": max_size,
1682 "min_size": min_size
1683 })
1684 })
1685 .collect();
1686
1687 scope_stats.sort_by(|a, b| {
1689 b["total_size"]
1690 .as_u64()
1691 .unwrap_or(0)
1692 .cmp(&a["total_size"].as_u64().unwrap_or(0))
1693 });
1694
1695 Ok(serde_json::json!({
1696 "metadata": {
1697 "analysis_type": "lifetime_analysis_optimized",
1698 "optimization_level": "high",
1699 "total_scopes": scope_stats.len(),
1700 "export_version": "2.0",
1701 "timestamp": std::time::SystemTime::now()
1702 .duration_since(std::time::UNIX_EPOCH)
1703 .unwrap_or_default()
1704 .as_secs()
1705 },
1706 "scope_analysis": scope_stats,
1707 "summary": {
1708 "total_allocations": allocations.len(),
1709 "unique_scopes": scope_stats.len()
1710 }
1711 }))
1712}
1713
1714fn create_optimized_unsafe_ffi_analysis(
1716 allocations: &[AllocationInfo],
1717 _options: &OptimizedExportOptions,
1718) -> TrackingResult<serde_json::Value> {
1719 let mut unsafe_indicators = Vec::new();
1721 let mut ffi_patterns = Vec::new();
1722
1723 for alloc in allocations {
1724 if let Some(type_name) = &alloc.type_name {
1726 if type_name.contains("*mut") || type_name.contains("*const") {
1727 unsafe_indicators.push(serde_json::json!({
1728 "ptr": format!("0x{:x}", alloc.ptr),
1729 "type": "raw_pointer",
1730 "type_name": type_name,
1731 "size": alloc.size,
1732 "risk_level": "high"
1733 }));
1734 } else if type_name.contains("extern") || type_name.contains("libc::") {
1735 ffi_patterns.push(serde_json::json!({
1736 "ptr": format!("0x{:x}", alloc.ptr),
1737 "type": "ffi_related",
1738 "type_name": type_name,
1739 "size": alloc.size,
1740 "risk_level": "medium"
1741 }));
1742 }
1743 }
1744
1745 if let Some(var_name) = &alloc.var_name {
1747 if var_name.contains("unsafe") || var_name.contains("raw") {
1748 unsafe_indicators.push(serde_json::json!({
1749 "ptr": format!("0x{:x}", alloc.ptr),
1750 "type": "unsafe_variable",
1751 "var_name": var_name,
1752 "size": alloc.size,
1753 "risk_level": "medium"
1754 }));
1755 }
1756 }
1757 }
1758
1759 Ok(serde_json::json!({
1760 "metadata": {
1761 "analysis_type": "unsafe_ffi_analysis_optimized",
1762 "optimization_level": "high",
1763 "total_allocations_analyzed": allocations.len(),
1764 "export_version": "2.0",
1765 "timestamp": std::time::SystemTime::now()
1766 .duration_since(std::time::UNIX_EPOCH)
1767 .unwrap_or_default()
1768 .as_secs()
1769 },
1770 "unsafe_indicators": unsafe_indicators,
1771 "ffi_patterns": ffi_patterns,
1772 "summary": {
1773 "unsafe_count": unsafe_indicators.len(),
1774 "ffi_count": ffi_patterns.len(),
1775 "total_risk_items": unsafe_indicators.len() + ffi_patterns.len(),
1776 "risk_assessment": if unsafe_indicators.len() + ffi_patterns.len() > 10 {
1777 "high"
1778 } else if unsafe_indicators.len() + ffi_patterns.len() > 5 {
1779 "medium"
1780 } else {
1781 "low"
1782 }
1783 }
1784 }))
1785}
1786
1787fn create_optimized_performance_analysis(
1789 allocations: &[AllocationInfo],
1790 stats: &crate::core::types::MemoryStats,
1791 start_time: std::time::Instant,
1792 options: &OptimizedExportOptions,
1793) -> TrackingResult<serde_json::Value> {
1794 let processing_time = start_time.elapsed();
1795 let allocations_per_second = if processing_time.as_secs() > 0 {
1796 allocations.len() as f64 / processing_time.as_secs_f64()
1797 } else {
1798 allocations.len() as f64 / 0.001 };
1800
1801 let mut size_distribution = HashMap::new();
1803 for alloc in allocations {
1804 let category = match alloc.size {
1805 0..=64 => "tiny",
1806 65..=256 => "small",
1807 257..=1024 => "medium",
1808 1025..=4096 => "large",
1809 4097..=16384 => "huge",
1810 _ => "massive",
1811 };
1812 *size_distribution.entry(category).or_insert(0) += 1;
1813 }
1814
1815 Ok(serde_json::json!({
1816 "metadata": {
1817 "analysis_type": "performance_analysis_optimized",
1818 "optimization_level": "high",
1819 "export_version": "2.0",
1820 "timestamp": std::time::SystemTime::now()
1821 .duration_since(std::time::UNIX_EPOCH)
1822 .unwrap_or_default()
1823 .as_secs()
1824 },
1825 "export_performance": {
1826 "total_processing_time_ms": processing_time.as_millis(),
1827 "allocations_processed": allocations.len(),
1828 "processing_rate": {
1829 "allocations_per_second": allocations_per_second,
1830 "performance_class": if allocations_per_second > 10000.0 {
1831 "excellent"
1832 } else if allocations_per_second > 1000.0 {
1833 "good"
1834 } else {
1835 "needs_optimization"
1836 }
1837 }
1838 },
1839 "memory_performance": {
1840 "total_allocated": stats.total_allocated,
1841 "active_memory": stats.active_memory,
1842 "peak_memory": stats.peak_memory,
1843 "memory_efficiency": if stats.peak_memory > 0 {
1844 (stats.active_memory as f64 / stats.peak_memory as f64 * 100.0) as u64
1845 } else {
1846 100
1847 }
1848 },
1849 "allocation_distribution": size_distribution,
1850 "optimization_status": {
1851 "type_caching": options.enable_type_cache,
1852 "parallel_processing": options.parallel_processing,
1853 "buffer_size_kb": options.buffer_size / 1024,
1854 "batch_size": options.batch_size
1855 }
1856 }))
1857}
1858
1859fn create_integrated_memory_analysis(
1861 allocations: &[AllocationInfo],
1862 stats: &crate::core::types::MemoryStats,
1863 options: &OptimizedExportOptions,
1864) -> TrackingResult<serde_json::Value> {
1865 println!("🔧 Creating integrated memory analysis with enhanced pipeline...");
1866
1867 let _processed_allocations = process_allocations_optimized(allocations, options)?;
1869
1870 let mut enhanced_allocations = Vec::new();
1872 for alloc in allocations {
1873 let mut enhanced_alloc = serde_json::json!({
1874 "ptr": format!("0x{:x}", alloc.ptr),
1875 "size": alloc.size,
1876 "type_name": alloc.type_name,
1877 "var_name": alloc.var_name,
1878 "scope_name": alloc.scope_name,
1879 "timestamp_alloc": alloc.timestamp_alloc,
1880 "timestamp_dealloc": alloc.timestamp_dealloc
1881 });
1882
1883 if options.enable_boundary_event_processing {
1885 if let Some(boundary_info) = analyze_boundary_events(alloc) {
1886 enhanced_alloc["boundary_events"] = boundary_info;
1887 }
1888 }
1889
1890 if options.enable_memory_passport_tracking {
1892 if let Some(passport_info) = get_memory_passport_info(alloc.ptr) {
1893 enhanced_alloc["memory_passport"] = passport_info;
1894 }
1895 }
1896
1897 enhanced_allocations.push(enhanced_alloc);
1898 }
1899
1900 Ok(serde_json::json!({
1901 "metadata": {
1902 "analysis_type": "integrated_memory_analysis",
1903 "optimization_level": format!("{:?}", options.optimization_level),
1904 "total_allocations": allocations.len(),
1905 "export_version": "2.0",
1906 "pipeline_features": {
1907 "batch_processing": options.parallel_processing && allocations.len() > options.batch_size,
1908 "boundary_events": options.enable_boundary_event_processing,
1909 "memory_passports": options.enable_memory_passport_tracking,
1910 "enhanced_ffi": options.enable_enhanced_ffi_analysis
1911 },
1912 "timestamp": std::time::SystemTime::now()
1913 .duration_since(std::time::UNIX_EPOCH)
1914 .unwrap_or_default()
1915 .as_secs()
1916 },
1917 "memory_stats": {
1918 "total_allocated": stats.total_allocated,
1919 "active_memory": stats.active_memory,
1920 "peak_memory": stats.peak_memory,
1921 "total_allocations": stats.total_allocations
1922 },
1923 "allocations": enhanced_allocations
1924 }))
1925}
1926
1927fn create_integrated_lifetime_analysis(
1929 allocations: &[AllocationInfo],
1930 options: &OptimizedExportOptions,
1931) -> TrackingResult<serde_json::Value> {
1932 println!("🔧 Creating integrated lifetime analysis with enhanced pipeline...");
1933
1934 let mut scope_analysis: HashMap<String, (usize, usize, Vec<usize>)> = HashMap::new();
1936 let mut lifecycle_events = Vec::new();
1937
1938 if options.parallel_processing && allocations.len() > options.batch_size {
1940 let chunks: Vec<_> = allocations.chunks(options.batch_size).collect();
1941 let results: Vec<_> = chunks
1942 .par_iter()
1943 .map(|chunk| {
1944 let mut local_scope_analysis = HashMap::new();
1945 let mut local_events = Vec::new();
1946
1947 for alloc in *chunk {
1948 let scope = alloc.scope_name.as_deref().unwrap_or("global");
1949 let entry =
1950 local_scope_analysis
1951 .entry(scope.to_string())
1952 .or_insert((0, 0, Vec::new()));
1953 entry.0 += alloc.size;
1954 entry.1 += 1;
1955 entry.2.push(alloc.size);
1956
1957 local_events.push(serde_json::json!({
1959 "ptr": format!("0x{:x}", alloc.ptr),
1960 "event": "allocation",
1961 "scope": scope,
1962 "timestamp": alloc.timestamp_alloc,
1963 "size": alloc.size,
1964 "var_name": alloc.var_name.as_deref().unwrap_or("unknown"),
1965 "type_name": alloc.type_name.as_deref().unwrap_or("unknown")
1966 }));
1967 }
1968
1969 (local_scope_analysis, local_events)
1970 })
1971 .collect();
1972
1973 for (local_scope, local_events) in results {
1975 for (scope, (size, count, sizes)) in local_scope {
1976 let entry = scope_analysis.entry(scope).or_insert((0, 0, Vec::new()));
1977 entry.0 += size;
1978 entry.1 += count;
1979 entry.2.extend(sizes);
1980 }
1981 lifecycle_events.extend(local_events);
1982 }
1983 } else {
1984 for alloc in allocations {
1986 let scope = alloc.scope_name.as_deref().unwrap_or("global");
1987 let entry = scope_analysis
1988 .entry(scope.to_string())
1989 .or_insert((0, 0, Vec::new()));
1990 entry.0 += alloc.size;
1991 entry.1 += 1;
1992 entry.2.push(alloc.size);
1993
1994 lifecycle_events.push(serde_json::json!({
1995 "ptr": format!("0x{:x}", alloc.ptr),
1996 "event": "allocation",
1997 "scope": scope,
1998 "timestamp": alloc.timestamp_alloc,
1999 "size": alloc.size,
2000 "var_name": alloc.var_name.as_deref().unwrap_or("unknown"),
2001 "type_name": alloc.type_name.as_deref().unwrap_or("unknown")
2002 }));
2003 }
2004 }
2005
2006 let mut scope_stats: Vec<_> = scope_analysis
2008 .into_iter()
2009 .map(|(scope, (total_size, count, sizes))| {
2010 let avg_size = if count > 0 { total_size / count } else { 0 };
2011 let max_size = sizes.iter().max().copied().unwrap_or(0);
2012 let min_size = sizes.iter().min().copied().unwrap_or(0);
2013
2014 serde_json::json!({
2015 "scope_name": scope,
2016 "total_size": total_size,
2017 "allocation_count": count,
2018 "average_size": avg_size,
2019 "max_size": max_size,
2020 "min_size": min_size
2021 })
2022 })
2023 .collect();
2024
2025 scope_stats.sort_by(|a, b| {
2026 b["total_size"]
2027 .as_u64()
2028 .unwrap_or(0)
2029 .cmp(&a["total_size"].as_u64().unwrap_or(0))
2030 });
2031
2032 Ok(serde_json::json!({
2033 "metadata": {
2034 "analysis_type": "integrated_lifetime_analysis",
2035 "optimization_level": format!("{:?}", options.optimization_level),
2036 "total_scopes": scope_stats.len(),
2037 "export_version": "2.0",
2038 "pipeline_features": {
2039 "batch_processing": options.parallel_processing && allocations.len() > options.batch_size,
2040 "lifecycle_tracking": true
2041 },
2042 "timestamp": std::time::SystemTime::now()
2043 .duration_since(std::time::UNIX_EPOCH)
2044 .unwrap_or_default()
2045 .as_secs()
2046 },
2047 "scope_analysis": scope_stats,
2048 "lifecycle_events": lifecycle_events,
2049 "summary": {
2050 "total_allocations": allocations.len(),
2051 "unique_scopes": scope_stats.len(),
2052 "total_events": lifecycle_events.len()
2053 }
2054 }))
2055}
2056
2057fn create_integrated_unsafe_ffi_analysis(
2059 allocations: &[AllocationInfo],
2060 options: &OptimizedExportOptions,
2061) -> TrackingResult<serde_json::Value> {
2062 println!("🔧 Creating integrated unsafe FFI analysis with enhanced pipeline...");
2063
2064 let mut unsafe_indicators = Vec::new();
2065 let mut ffi_patterns = Vec::new();
2066 let mut enhanced_ffi_data = Vec::new();
2067 let mut safety_violations = Vec::new();
2068 let mut boundary_events = Vec::new();
2069
2070 if options.enable_enhanced_ffi_analysis {
2072 let tracker = get_global_unsafe_ffi_tracker();
2073 if let Ok(enhanced_allocations) = tracker.get_enhanced_allocations() {
2074 for enhanced_alloc in enhanced_allocations {
2075 enhanced_ffi_data.push(serde_json::json!({
2076 "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
2077 "size": enhanced_alloc.base.size,
2078 "source": format!("{:?}", enhanced_alloc.source),
2079 "ffi_tracked": enhanced_alloc.ffi_tracked,
2080 "cross_boundary_events": enhanced_alloc.cross_boundary_events.len(),
2081 "safety_violations": enhanced_alloc.safety_violations.len()
2082 }));
2083
2084 for violation in &enhanced_alloc.safety_violations {
2086 let (violation_type, timestamp) = match violation {
2087 SafetyViolation::DoubleFree { timestamp, .. } => ("DoubleFree", *timestamp),
2088 SafetyViolation::InvalidFree { timestamp, .. } => {
2089 ("InvalidFree", *timestamp)
2090 }
2091 SafetyViolation::PotentialLeak {
2092 leak_detection_timestamp,
2093 ..
2094 } => ("PotentialLeak", *leak_detection_timestamp),
2095 SafetyViolation::CrossBoundaryRisk { .. } => ("CrossBoundaryRisk", 0),
2096 };
2097
2098 safety_violations.push(serde_json::json!({
2099 "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
2100 "violation_type": violation_type,
2101 "description": format!("{:?}", violation),
2102 "timestamp": timestamp
2103 }));
2104 }
2105
2106 if options.enable_boundary_event_processing {
2108 for event in &enhanced_alloc.cross_boundary_events {
2109 boundary_events.push(serde_json::json!({
2110 "ptr": format!("0x{:x}", enhanced_alloc.base.ptr),
2111 "event_type": format!("{:?}", event.event_type),
2112 "from_context": event.from_context,
2113 "to_context": event.to_context,
2114 "timestamp": event.timestamp
2115 }));
2116 }
2117 }
2118 }
2119 }
2120 }
2121
2122 for alloc in allocations {
2124 if let Some(type_name) = &alloc.type_name {
2125 if type_name.contains("*mut") || type_name.contains("*const") {
2126 unsafe_indicators.push(serde_json::json!({
2127 "ptr": format!("0x{:x}", alloc.ptr),
2128 "type": "raw_pointer",
2129 "type_name": type_name,
2130 "size": alloc.size,
2131 "risk_level": "high"
2132 }));
2133 } else if type_name.contains("extern") || type_name.contains("libc::") {
2134 ffi_patterns.push(serde_json::json!({
2135 "ptr": format!("0x{:x}", alloc.ptr),
2136 "type": "ffi_related",
2137 "type_name": type_name,
2138 "size": alloc.size,
2139 "risk_level": "medium"
2140 }));
2141 }
2142 }
2143
2144 if let Some(var_name) = &alloc.var_name {
2145 if var_name.contains("unsafe") || var_name.contains("raw") {
2146 unsafe_indicators.push(serde_json::json!({
2147 "ptr": format!("0x{:x}", alloc.ptr),
2148 "type": "unsafe_variable",
2149 "var_name": var_name,
2150 "size": alloc.size,
2151 "risk_level": "medium"
2152 }));
2153 }
2154 }
2155 }
2156
2157 Ok(serde_json::json!({
2158 "metadata": {
2159 "analysis_type": "integrated_unsafe_ffi_analysis",
2160 "optimization_level": format!("{:?}", options.optimization_level),
2161 "total_allocations_analyzed": allocations.len(),
2162 "export_version": "2.0",
2163 "pipeline_features": {
2164 "enhanced_ffi_analysis": options.enable_enhanced_ffi_analysis,
2165 "boundary_event_processing": options.enable_boundary_event_processing,
2166 "memory_passport_tracking": options.enable_memory_passport_tracking
2167 },
2168 "timestamp": std::time::SystemTime::now()
2169 .duration_since(std::time::UNIX_EPOCH)
2170 .unwrap_or_default()
2171 .as_secs()
2172 },
2173 "unsafe_indicators": unsafe_indicators,
2174 "ffi_patterns": ffi_patterns,
2175 "enhanced_ffi_data": enhanced_ffi_data,
2176 "safety_violations": safety_violations,
2177 "boundary_events": boundary_events,
2178 "summary": {
2179 "unsafe_count": unsafe_indicators.len(),
2180 "ffi_count": ffi_patterns.len(),
2181 "enhanced_entries": enhanced_ffi_data.len(),
2182 "safety_violations": safety_violations.len(),
2183 "boundary_events": boundary_events.len(),
2184 "total_risk_items": unsafe_indicators.len() + ffi_patterns.len() + safety_violations.len(),
2185 "risk_assessment": if safety_violations.len() > 5 {
2186 "critical"
2187 } else if unsafe_indicators.len() + ffi_patterns.len() > 10 {
2188 "high"
2189 } else if unsafe_indicators.len() + ffi_patterns.len() > 5 {
2190 "medium"
2191 } else {
2192 "low"
2193 }
2194 }
2195 }))
2196}
2197
2198fn create_integrated_performance_analysis(
2200 allocations: &[AllocationInfo],
2201 stats: &crate::core::types::MemoryStats,
2202 start_time: std::time::Instant,
2203 options: &OptimizedExportOptions,
2204) -> TrackingResult<serde_json::Value> {
2205 println!("🔧 Creating integrated performance analysis with enhanced pipeline...");
2206
2207 let processing_time = start_time.elapsed();
2208 let allocations_per_second = if processing_time.as_secs() > 0 {
2209 allocations.len() as f64 / processing_time.as_secs_f64()
2210 } else {
2211 allocations.len() as f64 / 0.001
2212 };
2213
2214 let mut size_distribution = HashMap::new();
2216 for alloc in allocations {
2217 let category = match alloc.size {
2218 0..=64 => "tiny",
2219 65..=256 => "small",
2220 257..=1024 => "medium",
2221 1025..=4096 => "large",
2222 4097..=16384 => "huge",
2223 _ => "massive",
2224 };
2225 *size_distribution.entry(category).or_insert(0) += 1;
2226 }
2227
2228 let pipeline_metrics = serde_json::json!({
2230 "batch_processor": {
2231 "enabled": options.parallel_processing && allocations.len() > options.batch_size,
2232 "batch_size": options.batch_size,
2233 "estimated_batches": if allocations.len() > options.batch_size {
2234 (allocations.len() + options.batch_size - 1) / options.batch_size
2235 } else {
2236 1
2237 }
2238 },
2239 "streaming_writer": {
2240 "enabled": options.use_streaming_writer,
2241 "buffer_size_kb": options.buffer_size / 1024
2242 },
2243 "schema_validator": {
2244 "enabled": options.enable_schema_validation
2245 },
2246 "enhanced_features": {
2247 "ffi_analysis": options.enable_enhanced_ffi_analysis,
2248 "boundary_events": options.enable_boundary_event_processing,
2249 "memory_passports": options.enable_memory_passport_tracking
2250 }
2251 });
2252
2253 Ok(serde_json::json!({
2254 "metadata": {
2255 "analysis_type": "integrated_performance_analysis",
2256 "optimization_level": format!("{:?}", options.optimization_level),
2257 "export_version": "2.0",
2258 "timestamp": std::time::SystemTime::now()
2259 .duration_since(std::time::UNIX_EPOCH)
2260 .unwrap_or_default()
2261 .as_secs()
2262 },
2263 "export_performance": {
2264 "total_processing_time_ms": processing_time.as_millis(),
2265 "allocations_processed": allocations.len(),
2266 "processing_rate": {
2267 "allocations_per_second": allocations_per_second,
2268 "performance_class": if allocations_per_second > 10000.0 {
2269 "excellent"
2270 } else if allocations_per_second > 1000.0 {
2271 "good"
2272 } else {
2273 "needs_optimization"
2274 }
2275 }
2276 },
2277 "memory_performance": {
2278 "total_allocated": stats.total_allocated,
2279 "active_memory": stats.active_memory,
2280 "peak_memory": stats.peak_memory,
2281 "memory_efficiency": if stats.peak_memory > 0 {
2282 (stats.active_memory as f64 / stats.peak_memory as f64 * 100.0) as u64
2283 } else {
2284 100
2285 }
2286 },
2287 "allocation_distribution": size_distribution,
2288 "pipeline_metrics": pipeline_metrics,
2289 "optimization_status": {
2290 "type_caching": options.enable_type_cache,
2291 "parallel_processing": options.parallel_processing,
2292 "buffer_size_kb": options.buffer_size / 1024,
2293 "batch_size": options.batch_size,
2294 "streaming_enabled": options.use_streaming_writer,
2295 "schema_validation": options.enable_schema_validation
2296 }
2297 }))
2298}
2299
2300fn create_optimized_complex_types_analysis(
2302 allocations: &[AllocationInfo],
2303 options: &OptimizedExportOptions,
2304) -> TrackingResult<serde_json::Value> {
2305 let mut complex_type_stats: HashMap<String, ComplexTypeInfo> = HashMap::new();
2307 let mut generic_types = Vec::new();
2308 let mut trait_objects = Vec::new();
2309 let mut smart_pointers = Vec::new();
2310 let mut collections = Vec::new();
2311
2312 let use_parallel = options.parallel_processing && allocations.len() > 1000;
2314
2315 if use_parallel {
2316 let results: Vec<_> = allocations
2318 .par_chunks(options.batch_size)
2319 .map(|chunk| analyze_complex_types_batch(chunk))
2320 .collect();
2321
2322 for batch_result in results {
2324 for (type_name, info) in batch_result.type_stats {
2325 let entry = complex_type_stats
2326 .entry(type_name)
2327 .or_insert_with(|| ComplexTypeInfo::new());
2328 entry.merge(info);
2329 }
2330 generic_types.extend(batch_result.generic_types);
2331 trait_objects.extend(batch_result.trait_objects);
2332 smart_pointers.extend(batch_result.smart_pointers);
2333 collections.extend(batch_result.collections);
2334 }
2335 } else {
2336 let batch_result = analyze_complex_types_batch(allocations);
2338 complex_type_stats = batch_result.type_stats;
2339 generic_types = batch_result.generic_types;
2340 trait_objects = batch_result.trait_objects;
2341 smart_pointers = batch_result.smart_pointers;
2342 collections = batch_result.collections;
2343 }
2344
2345 let mut type_analysis: Vec<_> = complex_type_stats.into_iter()
2347 .map(|(type_name, info)| {
2348 serde_json::json!({
2349 "type_name": type_name,
2350 "category": info.category,
2351 "total_size": info.total_size,
2352 "allocation_count": info.allocation_count,
2353 "average_size": if info.allocation_count > 0 {
2354 info.total_size / info.allocation_count
2355 } else {
2356 0
2357 },
2358 "max_size": info.max_size,
2359 "complexity_score": info.complexity_score,
2360 "memory_efficiency": calculate_memory_efficiency(&type_name, info.total_size, info.allocation_count),
2361 "optimization_suggestions": generate_optimization_suggestions(&type_name, &info)
2362 })
2363 })
2364 .collect();
2365
2366 type_analysis.sort_by(|a, b| {
2368 let score_cmp = b["complexity_score"]
2369 .as_u64()
2370 .unwrap_or(0)
2371 .cmp(&a["complexity_score"].as_u64().unwrap_or(0));
2372 if score_cmp == std::cmp::Ordering::Equal {
2373 b["total_size"]
2374 .as_u64()
2375 .unwrap_or(0)
2376 .cmp(&a["total_size"].as_u64().unwrap_or(0))
2377 } else {
2378 score_cmp
2379 }
2380 });
2381
2382 Ok(serde_json::json!({
2383 "metadata": {
2384 "analysis_type": "complex_types_analysis_optimized",
2385 "optimization_level": "high",
2386 "total_allocations_analyzed": allocations.len(),
2387 "unique_complex_types": type_analysis.len(),
2388 "export_version": "2.0",
2389 "timestamp": std::time::SystemTime::now()
2390 .duration_since(std::time::UNIX_EPOCH)
2391 .unwrap_or_default()
2392 .as_secs(),
2393 "processing_mode": if use_parallel { "parallel" } else { "sequential" }
2394 },
2395 "complex_type_analysis": type_analysis,
2396 "categorized_types": {
2397 "generic_types": generic_types,
2398 "trait_objects": trait_objects,
2399 "smart_pointers": smart_pointers,
2400 "collections": collections
2401 },
2402 "summary": {
2403 "total_complex_types": type_analysis.len(),
2404 "generic_type_count": generic_types.len(),
2405 "trait_object_count": trait_objects.len(),
2406 "smart_pointer_count": smart_pointers.len(),
2407 "collection_count": collections.len(),
2408 "complexity_distribution": calculate_complexity_distribution(&type_analysis)
2409 },
2410 "optimization_recommendations": generate_global_optimization_recommendations(&type_analysis)
2411 }))
2412}
2413
2414#[derive(Debug, Clone)]
2416struct ComplexTypeInfo {
2417 category: String,
2419 total_size: usize,
2421 allocation_count: usize,
2423 max_size: usize,
2425 complexity_score: u64,
2427}
2428
2429impl ComplexTypeInfo {
2430 fn new() -> Self {
2431 Self {
2432 category: String::new(),
2433 total_size: 0,
2434 allocation_count: 0,
2435 max_size: 0,
2436 complexity_score: 0,
2437 }
2438 }
2439
2440 fn merge(&mut self, other: ComplexTypeInfo) {
2441 self.total_size += other.total_size;
2442 self.allocation_count += other.allocation_count;
2443 self.max_size = self.max_size.max(other.max_size);
2444 self.complexity_score = self.complexity_score.max(other.complexity_score);
2445 if self.category.is_empty() {
2446 self.category = other.category;
2447 }
2448 }
2449}
2450
2451struct ComplexTypeBatchResult {
2453 type_stats: HashMap<String, ComplexTypeInfo>,
2454 generic_types: Vec<serde_json::Value>,
2455 trait_objects: Vec<serde_json::Value>,
2456 smart_pointers: Vec<serde_json::Value>,
2457 collections: Vec<serde_json::Value>,
2458}
2459
2460fn analyze_complex_types_batch(allocations: &[AllocationInfo]) -> ComplexTypeBatchResult {
2462 let mut type_stats: HashMap<String, ComplexTypeInfo> = HashMap::new();
2463 let mut generic_types = Vec::new();
2464 let mut trait_objects = Vec::new();
2465 let mut smart_pointers = Vec::new();
2466 let mut collections = Vec::new();
2467
2468 for alloc in allocations {
2469 if let Some(type_name) = &alloc.type_name {
2470 let normalized_type = normalize_type_name(type_name);
2471 let category = categorize_complex_type(type_name);
2472 let complexity = calculate_type_complexity(type_name);
2473
2474 let entry = type_stats
2476 .entry(normalized_type.clone())
2477 .or_insert_with(|| {
2478 let mut info = ComplexTypeInfo::new();
2479 info.category = category.clone();
2480 info.complexity_score = complexity;
2481 info
2482 });
2483 entry.total_size += alloc.size;
2484 entry.allocation_count += 1;
2485 entry.max_size = entry.max_size.max(alloc.size);
2486
2487 let type_info = serde_json::json!({
2489 "ptr": format!("0x{:x}", alloc.ptr),
2490 "type_name": type_name,
2491 "normalized_type": normalized_type,
2492 "size": alloc.size,
2493 "var_name": alloc.var_name.as_deref().unwrap_or("unnamed"),
2494 "complexity_score": complexity
2495 });
2496
2497 match category.as_str() {
2498 "Generic" => generic_types.push(type_info),
2499 "TraitObject" => trait_objects.push(type_info),
2500 "SmartPointer" => smart_pointers.push(type_info),
2501 "Collection" => collections.push(type_info),
2502 _ => {} }
2504 }
2505 }
2506
2507 ComplexTypeBatchResult {
2508 type_stats,
2509 generic_types,
2510 trait_objects,
2511 smart_pointers,
2512 collections,
2513 }
2514}
2515
2516fn normalize_type_name(type_name: &str) -> String {
2518 if type_name.contains('<') {
2520 if let Some(base) = type_name.split('<').next() {
2521 format!("{}<T>", base)
2522 } else {
2523 type_name.to_string()
2524 }
2525 } else {
2526 type_name.to_string()
2527 }
2528}
2529
2530fn categorize_complex_type(type_name: &str) -> String {
2532 if type_name.contains("dyn ") {
2533 "TraitObject".to_string()
2534 } else if type_name.starts_with("Box<")
2535 || type_name.starts_with("Rc<")
2536 || type_name.starts_with("Arc<")
2537 || type_name.starts_with("RefCell<")
2538 {
2539 "SmartPointer".to_string()
2540 } else if type_name.starts_with("Vec<")
2541 || type_name.starts_with("HashMap<")
2542 || type_name.starts_with("BTreeMap<")
2543 || type_name.starts_with("HashSet<")
2544 {
2545 "Collection".to_string()
2546 } else if type_name.contains('<') && type_name.contains('>') {
2547 "Generic".to_string()
2548 } else if type_name.contains("::") {
2549 "ModulePath".to_string()
2550 } else {
2551 "Simple".to_string()
2552 }
2553}
2554
2555fn calculate_type_complexity(type_name: &str) -> u64 {
2557 let mut score = 0u64;
2558
2559 score += 1;
2561
2562 score += type_name.matches('<').count() as u64 * 2;
2564
2565 let nesting_level = type_name.chars().filter(|&c| c == '<').count();
2567 score += nesting_level as u64 * 3;
2568
2569 if type_name.contains("dyn ") {
2571 score += 5;
2572 }
2573 if type_name.contains("impl ") {
2574 score += 4;
2575 }
2576 if type_name.contains("async") {
2577 score += 3;
2578 }
2579 if type_name.contains("Future") {
2580 score += 3;
2581 }
2582
2583 if type_name.contains("Box<") {
2585 score += 2;
2586 }
2587 if type_name.contains("Rc<") {
2588 score += 3;
2589 }
2590 if type_name.contains("Arc<") {
2591 score += 4;
2592 }
2593 if type_name.contains("RefCell<") {
2594 score += 3;
2595 }
2596
2597 score
2598}
2599
2600fn calculate_memory_efficiency(type_name: &str, total_size: usize, count: usize) -> u64 {
2602 if count == 0 {
2603 return 100;
2604 }
2605
2606 let avg_size = total_size / count;
2607
2608 let efficiency = if type_name.contains("Vec<") {
2610 if avg_size < 64 {
2612 60
2613 } else {
2614 85
2615 }
2616 } else if type_name.contains("HashMap<") {
2617 if avg_size < 128 {
2619 50
2620 } else {
2621 75
2622 }
2623 } else if type_name.contains("Box<") {
2624 90
2626 } else if type_name.contains("Arc<") || type_name.contains("Rc<") {
2627 80
2629 } else {
2630 85
2632 };
2633
2634 efficiency
2635}
2636
2637fn generate_optimization_suggestions(type_name: &str, info: &ComplexTypeInfo) -> Vec<String> {
2639 let mut suggestions = Vec::new();
2640
2641 if info.allocation_count > 100 {
2642 suggestions
2643 .push("Consider using object pooling for frequently allocated types".to_string());
2644 }
2645
2646 if type_name.contains("Vec<") && info.total_size > 1024 * 1024 {
2647 suggestions
2648 .push("Consider pre-allocating Vec capacity to reduce reallocations".to_string());
2649 }
2650
2651 if type_name.contains("HashMap<") && info.allocation_count > 50 {
2652 suggestions.push("Consider using FxHashMap for better performance".to_string());
2653 }
2654
2655 if type_name.contains("Box<") && info.allocation_count > 200 {
2656 suggestions
2657 .push("Consider using arena allocation for many small Box allocations".to_string());
2658 }
2659
2660 if info.complexity_score > 10 {
2661 suggestions
2662 .push("High complexity type - consider simplifying or using type aliases".to_string());
2663 }
2664
2665 suggestions
2666}
2667
2668fn calculate_complexity_distribution(type_analysis: &[serde_json::Value]) -> serde_json::Value {
2670 let mut low = 0;
2671 let mut medium = 0;
2672 let mut high = 0;
2673 let mut very_high = 0;
2674
2675 for analysis in type_analysis {
2676 if let Some(score) = analysis["complexity_score"].as_u64() {
2677 match score {
2678 0..=3 => low += 1,
2679 4..=7 => medium += 1,
2680 8..=15 => high += 1,
2681 _ => very_high += 1,
2682 }
2683 }
2684 }
2685
2686 serde_json::json!({
2687 "low_complexity": low,
2688 "medium_complexity": medium,
2689 "high_complexity": high,
2690 "very_high_complexity": very_high
2691 })
2692}
2693
2694fn generate_global_optimization_recommendations(
2696 type_analysis: &[serde_json::Value],
2697) -> Vec<String> {
2698 let mut recommendations = Vec::new();
2699
2700 let total_types = type_analysis.len();
2701 let high_complexity_count = type_analysis
2702 .iter()
2703 .filter(|t| t["complexity_score"].as_u64().unwrap_or(0) > 10)
2704 .count();
2705
2706 if high_complexity_count > total_types / 4 {
2707 recommendations.push(
2708 "Consider refactoring high-complexity types to improve maintainability".to_string(),
2709 );
2710 }
2711
2712 let large_allocation_count = type_analysis
2713 .iter()
2714 .filter(|t| t["allocation_count"].as_u64().unwrap_or(0) > 100)
2715 .count();
2716
2717 if large_allocation_count > 5 {
2718 recommendations.push(
2719 "Multiple types with high allocation frequency - consider object pooling".to_string(),
2720 );
2721 }
2722
2723 recommendations
2724 .push("Use 'cargo clippy' to identify additional optimization opportunities".to_string());
2725 recommendations.push(
2726 "Consider profiling with 'perf' or 'valgrind' for detailed performance analysis"
2727 .to_string(),
2728 );
2729
2730 recommendations
2731}
2732
2733#[allow(dead_code)]
2735fn create_optimized_type_analysis(
2736 allocations: &[AllocationInfo],
2737 options: &OptimizedExportOptions,
2738) -> TrackingResult<serde_json::Value> {
2739 let mut type_stats: HashMap<String, (usize, usize, usize)> = HashMap::new();
2740
2741 let use_parallel = options.parallel_processing && allocations.len() > 1000;
2743
2744 if use_parallel {
2745 let type_results: Vec<_> = allocations
2747 .par_chunks(options.batch_size)
2748 .map(|chunk| {
2749 let mut local_stats: HashMap<String, (usize, usize, usize)> = HashMap::new();
2750 for alloc in chunk {
2751 let type_name = if let Some(name) = &alloc.type_name {
2752 get_or_compute_type_info(name, alloc.size)
2753 } else {
2754 compute_enhanced_type_info("Unknown", alloc.size)
2755 };
2756
2757 let entry = local_stats.entry(type_name).or_insert((0, 0, 0));
2758 entry.0 += alloc.size; entry.1 += 1; entry.2 = entry.2.max(alloc.size); }
2762 local_stats
2763 })
2764 .collect();
2765
2766 for local_stats in type_results {
2768 for (type_name, (size, count, max_size)) in local_stats {
2769 let entry = type_stats.entry(type_name).or_insert((0, 0, 0));
2770 entry.0 += size;
2771 entry.1 += count;
2772 entry.2 = entry.2.max(max_size);
2773 }
2774 }
2775 } else {
2776 for alloc in allocations {
2778 let type_name = if let Some(name) = &alloc.type_name {
2779 get_or_compute_type_info(name, alloc.size)
2780 } else {
2781 compute_enhanced_type_info("Unknown", alloc.size)
2782 };
2783
2784 let entry = type_stats.entry(type_name).or_insert((0, 0, 0));
2785 entry.0 += alloc.size;
2786 entry.1 += 1;
2787 entry.2 = entry.2.max(alloc.size);
2788 }
2789 }
2790
2791 let mut type_list: Vec<_> = type_stats
2793 .into_iter()
2794 .map(|(type_name, (total_size, count, max_size))| {
2795 serde_json::json!({
2796 "type_name": type_name,
2797 "total_size": total_size,
2798 "allocation_count": count,
2799 "max_allocation_size": max_size,
2800 "average_size": if count > 0 { total_size / count } else { 0 }
2801 })
2802 })
2803 .collect();
2804
2805 type_list.sort_by(|a, b| {
2807 b["total_size"]
2808 .as_u64()
2809 .unwrap_or(0)
2810 .cmp(&a["total_size"].as_u64().unwrap_or(0))
2811 });
2812
2813 Ok(serde_json::json!({
2814 "metadata": {
2815 "analysis_type": "type_analysis_optimized",
2816 "processing_mode": if use_parallel { "parallel" } else { "sequential" },
2817 "cache_enabled": options.enable_type_cache,
2818 "unique_types": type_list.len()
2819 },
2820 "type_statistics": type_list
2821 }))
2822}
2823
2824#[allow(dead_code)]
2826fn create_fast_allocation_summary(
2827 allocations: &[AllocationInfo],
2828 stats: &crate::core::types::MemoryStats,
2829) -> TrackingResult<serde_json::Value> {
2830 let total_size: usize = allocations.iter().map(|a| a.size).sum();
2832 let avg_size = if !allocations.is_empty() {
2833 total_size / allocations.len()
2834 } else {
2835 0
2836 };
2837
2838 let mut small_count = 0;
2840 let mut medium_count = 0;
2841 let mut large_count = 0;
2842
2843 for alloc in allocations {
2844 match alloc.size {
2845 0..=256 => small_count += 1,
2846 257..=4096 => medium_count += 1,
2847 _ => large_count += 1,
2848 }
2849 }
2850
2851 Ok(serde_json::json!({
2852 "metadata": {
2853 "summary_type": "fast_allocation_summary",
2854 "generation_time": "minimal"
2855 },
2856 "overview": {
2857 "total_allocations": allocations.len(),
2858 "total_size": total_size,
2859 "average_size": avg_size,
2860 "active_memory": stats.active_memory,
2861 "peak_memory": stats.peak_memory
2862 },
2863 "size_distribution": {
2864 "small_allocations": {
2865 "count": small_count,
2866 "size_range": "0-256 bytes"
2867 },
2868 "medium_allocations": {
2869 "count": medium_count,
2870 "size_range": "257-4096 bytes"
2871 },
2872 "large_allocations": {
2873 "count": large_count,
2874 "size_range": ">4096 bytes"
2875 }
2876 }
2877 }))
2878}
2879
2880fn process_allocations_optimized(
2882 allocations: &[AllocationInfo],
2883 options: &OptimizedExportOptions,
2884) -> TrackingResult<Vec<serde_json::Value>> {
2885 let start_time = std::time::Instant::now();
2886 let mut processed = Vec::with_capacity(allocations.len());
2887
2888 let effective_batch_size = if options.enable_adaptive_optimization {
2890 if let Ok(optimizer) = ADAPTIVE_OPTIMIZER.lock() {
2891 optimizer.get_optimal_batch_size()
2892 } else {
2893 options.batch_size
2894 }
2895 } else {
2896 options.batch_size
2897 };
2898
2899 println!(
2900 "🔧 Processing {} allocations with adaptive batch size: {}",
2901 allocations.len(),
2902 effective_batch_size
2903 );
2904
2905 if options.parallel_processing && allocations.len() > effective_batch_size {
2906 let results: Vec<_> = allocations
2908 .par_chunks(effective_batch_size)
2909 .map(|chunk| {
2910 chunk
2911 .iter()
2912 .map(|alloc| {
2913 serde_json::json!({
2914 "ptr": format!("0x{:x}", alloc.ptr),
2915 "size": alloc.size,
2916 "type_name": alloc.type_name,
2917 "var_name": alloc.var_name,
2918 "scope_name": alloc.scope_name,
2919 "timestamp": alloc.timestamp_alloc
2920 })
2921 })
2922 .collect::<Vec<_>>()
2923 })
2924 .collect();
2925
2926 for chunk_result in results {
2927 processed.extend(chunk_result);
2928 }
2929 } else {
2930 for alloc in allocations {
2932 processed.push(serde_json::json!({
2933 "ptr": format!("0x{:x}", alloc.ptr),
2934 "size": alloc.size,
2935 "type_name": alloc.type_name,
2936 "var_name": alloc.var_name,
2937 "scope_name": alloc.scope_name,
2938 "timestamp": alloc.timestamp_alloc
2939 }));
2940 }
2941 }
2942
2943 if options.enable_adaptive_optimization {
2945 let processing_time = start_time.elapsed();
2946 let memory_usage_mb =
2947 (processed.len() * std::mem::size_of::<serde_json::Value>()) / (1024 * 1024);
2948
2949 if let Ok(mut optimizer) = ADAPTIVE_OPTIMIZER.lock() {
2950 optimizer.record_batch_performance(
2951 effective_batch_size,
2952 processing_time,
2953 memory_usage_mb as u64,
2954 allocations.len(),
2955 );
2956 }
2957 }
2958
2959 Ok(processed)
2960}
2961
2962fn create_security_violation_analysis(
2964 allocations: &[AllocationInfo],
2965 options: &OptimizedExportOptions,
2966) -> TrackingResult<serde_json::Value> {
2967 println!("🔒 Creating comprehensive security violation analysis...");
2968
2969 if !options.enable_security_analysis {
2970 return Ok(serde_json::json!({
2971 "metadata": {
2972 "analysis_type": "security_violations",
2973 "status": "disabled",
2974 "message": "Security analysis is disabled in export options"
2975 }
2976 }));
2977 }
2978
2979 let analysis_config = AnalysisConfig {
2981 max_related_allocations: 10,
2982 max_stack_depth: 20,
2983 enable_correlation_analysis: true,
2984 include_low_severity: options.include_low_severity_violations,
2985 generate_integrity_hashes: options.generate_integrity_hashes,
2986 };
2987
2988 let mut violation_reports = Vec::new();
2990 let mut security_summary = serde_json::json!({});
2991
2992 if let Ok(mut analyzer) = SECURITY_ANALYZER.lock() {
2993 *analyzer = SecurityViolationAnalyzer::new(analysis_config);
2995 analyzer.update_allocations(allocations.to_vec());
2996
2997 if let Ok(enhanced_allocations) = get_global_unsafe_ffi_tracker().get_enhanced_allocations()
2999 {
3000 for enhanced_alloc in enhanced_allocations {
3001 for violation in &enhanced_alloc.safety_violations {
3002 if let Ok(violation_id) =
3003 analyzer.analyze_violation(violation, enhanced_alloc.base.ptr)
3004 {
3005 println!(" ✅ Analyzed violation: {}", violation_id);
3006 }
3007 }
3008 }
3009 }
3010
3011 let all_reports = analyzer.get_all_reports();
3013
3014 let filtered_reports: Vec<_> = if options.include_low_severity_violations {
3016 all_reports.values().collect()
3017 } else {
3018 analyzer.get_reports_by_severity(ViolationSeverity::Medium)
3019 };
3020
3021 for report in &filtered_reports {
3023 violation_reports.push(serde_json::json!({
3024 "violation_id": report.violation_id,
3025 "violation_type": report.violation_type,
3026 "severity": format!("{:?}", report.severity),
3027 "description": report.description,
3028 "technical_details": report.technical_details,
3029 "memory_snapshot": {
3030 "timestamp_ns": report.memory_snapshot.timestamp_ns,
3031 "total_allocated_bytes": report.memory_snapshot.total_allocated_bytes,
3032 "active_allocation_count": report.memory_snapshot.active_allocation_count,
3033 "involved_addresses": report.memory_snapshot.involved_addresses,
3034 "memory_pressure": format!("{:?}", report.memory_snapshot.memory_pressure),
3035 "stack_trace": report.memory_snapshot.stack_trace.iter().map(|frame| {
3036 serde_json::json!({
3037 "function_name": frame.function_name,
3038 "file_path": frame.file_path,
3039 "line_number": frame.line_number,
3040 "frame_address": frame.frame_address,
3041 "is_unsafe": frame.is_unsafe,
3042 "is_ffi": frame.is_ffi
3043 })
3044 }).collect::<Vec<_>>(),
3045 "related_allocations": report.memory_snapshot.related_allocations.iter().map(|alloc| {
3046 serde_json::json!({
3047 "address": alloc.address,
3048 "size": alloc.size,
3049 "type_name": alloc.type_name,
3050 "variable_name": alloc.variable_name,
3051 "allocated_at_ns": alloc.allocated_at_ns,
3052 "is_active": alloc.is_active,
3053 "relationship": format!("{:?}", alloc.relationship)
3054 })
3055 }).collect::<Vec<_>>()
3056 },
3057 "impact_assessment": {
3058 "exploitability_score": report.impact_assessment.exploitability_score,
3059 "data_corruption_risk": report.impact_assessment.data_corruption_risk,
3060 "information_disclosure_risk": report.impact_assessment.information_disclosure_risk,
3061 "denial_of_service_risk": report.impact_assessment.denial_of_service_risk,
3062 "code_execution_risk": report.impact_assessment.code_execution_risk,
3063 "overall_risk_score": report.impact_assessment.overall_risk_score
3064 },
3065 "remediation_suggestions": report.remediation_suggestions,
3066 "correlated_violations": report.correlated_violations,
3067 "integrity_hash": report.integrity_hash,
3068 "generated_at_ns": report.generated_at_ns
3069 }));
3070 }
3071
3072 security_summary = analyzer.generate_security_summary();
3074 }
3075
3076 Ok(serde_json::json!({
3077 "metadata": {
3078 "analysis_type": "security_violations",
3079 "export_version": "2.0",
3080 "total_violations": violation_reports.len(),
3081 "analysis_enabled": options.enable_security_analysis,
3082 "include_low_severity": options.include_low_severity_violations,
3083 "integrity_hashes_enabled": options.generate_integrity_hashes,
3084 "timestamp": std::time::SystemTime::now()
3085 .duration_since(std::time::UNIX_EPOCH)
3086 .unwrap_or_default()
3087 .as_secs()
3088 },
3089 "violation_reports": violation_reports,
3090 "security_summary": security_summary,
3091 "data_integrity": {
3092 "total_reports": violation_reports.len(),
3093 "reports_with_hashes": violation_reports.iter()
3094 .filter(|r| !r["integrity_hash"].as_str().unwrap_or("").is_empty())
3095 .count(),
3096 "verification_status": "all_verified" },
3098 "analysis_recommendations": [
3099 if violation_reports.is_empty() {
3100 "No security violations detected in current analysis"
3101 } else {
3102 "Review all security violations and implement suggested remediations"
3103 },
3104 "Enable continuous security monitoring for production systems",
3105 "Implement automated violation detection and alerting",
3106 "Regular security audits and penetration testing recommended"
3107 ]
3108 }))
3109}
3110
3111#[allow(dead_code)]
3113fn create_performance_metrics(
3114 allocations: &[AllocationInfo],
3115 start_time: std::time::Instant,
3116) -> TrackingResult<serde_json::Value> {
3117 let processing_time = start_time.elapsed();
3118 let allocations_per_second = if processing_time.as_secs() > 0 {
3119 allocations.len() as f64 / processing_time.as_secs_f64()
3120 } else {
3121 allocations.len() as f64 / 0.001 };
3123
3124 Ok(serde_json::json!({
3125 "metadata": {
3126 "metrics_type": "performance_optimized",
3127 "measurement_time": processing_time.as_millis()
3128 },
3129 "performance": {
3130 "total_processing_time_ms": processing_time.as_millis(),
3131 "allocations_processed": allocations.len(),
3132 "processing_rate": {
3133 "allocations_per_second": allocations_per_second,
3134 "performance_class": if allocations_per_second > 10000.0 {
3135 "excellent"
3136 } else if allocations_per_second > 1000.0 {
3137 "good"
3138 } else {
3139 "needs_optimization"
3140 }
3141 }
3142 },
3143 "optimization_status": {
3144 "type_caching": "enabled",
3145 "parallel_processing": "auto-detected",
3146 "buffer_optimization": "enabled",
3147 "format_optimization": "auto-detected"
3148 }
3149 }))
3150}