1use crate::async_memory::visualization::VisualizationConfig;
5use crate::lockfree::LockfreeAnalysis;
6use std::collections::HashMap;
7use std::fs;
8
9#[derive(Debug)]
11pub struct HybridAnalysisData {
12 pub lockfree_analysis: Option<LockfreeAnalysis>,
13 pub visualization_config: VisualizationConfig,
14 pub thread_task_mapping: HashMap<usize, Vec<usize>>,
15 pub variable_registry: HashMap<String, VariableDetail>,
16 pub performance_metrics: PerformanceTimeSeries,
17}
18
19#[derive(Debug)]
21pub struct PerformanceTimeSeries {
22 pub cpu_usage: Vec<f64>,
23 pub memory_usage: Vec<u64>,
24 pub io_operations: Vec<u64>,
25 pub network_bytes: Vec<u64>,
26 pub timestamps: Vec<u64>,
27 pub thread_cpu_breakdown: HashMap<usize, Vec<f64>>,
28 pub thread_memory_breakdown: HashMap<usize, Vec<u64>>,
29}
30
31#[derive(Debug, Clone)]
33pub struct VariableDetail {
34 pub name: String,
35 pub type_info: String,
36 pub thread_id: usize,
37 pub task_id: Option<usize>,
38 pub allocation_count: u64,
39 pub memory_usage: u64,
40 pub lifecycle_stage: LifecycleStage,
41}
42
43#[derive(Debug, Clone)]
45pub enum LifecycleStage {
46 Allocated,
47 Active,
48 Shared,
49 Deallocated,
50}
51
52#[derive(Debug, Clone)]
54pub enum RenderMode {
55 Comprehensive,
56 ThreadFocused,
57 VariableDetailed,
58}
59
60pub struct FixedHybridTemplate {
61 pub output_path: String,
62 pub thread_count: usize,
63 pub task_count: usize,
64 pub render_mode: RenderMode,
65}
66
67impl FixedHybridTemplate {
68 pub fn new(thread_count: usize, task_count: usize) -> Self {
69 Self {
70 output_path: "simple_hybrid_dashboard_variable_detailed.html".to_string(),
71 thread_count,
72 task_count,
73 render_mode: RenderMode::VariableDetailed,
74 }
75 }
76
77 pub fn with_render_mode(mut self, mode: RenderMode) -> Self {
78 self.render_mode = mode;
79 self
80 }
81
82 pub fn with_variable_details(self, _enable: bool) -> Self {
83 self
85 }
86
87 pub fn with_enhanced_insights(self, _enable: bool) -> Self {
88 self
90 }
91
92 pub fn generate_hybrid_dashboard(
94 &self,
95 data: &HybridAnalysisData,
96 ) -> Result<String, Box<dyn std::error::Error>> {
97 let template_content = match fs::read_to_string("templates/hybrid_dashboard.html") {
99 Ok(content) => content,
100 Err(_) => {
101 match fs::read_to_string("../templates/hybrid_dashboard.html") {
103 Ok(content) => content,
104 Err(_) => {
105 return Err("Template file templates/hybrid_dashboard.html not found".into())
106 }
107 }
108 }
109 };
110
111 Ok(self.render_with_template(&template_content, data))
112 }
113
114 fn render_with_template(&self, template: &str, data: &HybridAnalysisData) -> String {
116 let mut html = template.to_string();
117
118 html = html.replace("{{TITLE}}", "🔬 Memory Analysis Dashboard");
120 html = html.replace(
121 "{{TOTAL_MEMORY}}",
122 &format!("{:.1}MB", self.calculate_total_memory(data)),
123 );
124 html = html.replace(
125 "{{TOTAL_VARIABLES}}",
126 &data.variable_registry.len().to_string(),
127 );
128 html = html.replace(
129 "{{THREAD_COUNT}}",
130 &data
131 .lockfree_analysis
132 .as_ref()
133 .map_or(0, |a| a.thread_stats.len())
134 .to_string(),
135 );
136 html = html.replace("{{EFFICIENCY}}", "85.2");
137
138 let variables: Vec<VariableDetail> = data.variable_registry.values().cloned().collect();
140 html = html.replace(
141 "{{VARIABLES_HTML}}",
142 &self.generate_variables_html(&variables),
143 );
144 html = html.replace("{{MEMORY_MAP_HTML}}", &self.generate_memory_map_html(data));
145
146 html = html.replace(
148 "{{VARIABLES_DATA}}",
149 &self.serialize_variables_for_js(&variables),
150 );
151 html = html.replace("{{THREADS_DATA}}", &self.serialize_threads_for_js(data));
152 html = html.replace("{{TASKS_DATA}}", &self.serialize_tasks_for_js(data));
153
154 html = self.replace_insights_placeholders(html, data);
156
157 html
158 }
159
160 fn calculate_total_memory(&self, data: &HybridAnalysisData) -> f64 {
162 data.variable_registry
163 .values()
164 .map(|v| v.memory_usage as f64 / 1024.0 / 1024.0)
165 .sum()
166 }
167
168 fn generate_variables_html(&self, variables: &[VariableDetail]) -> String {
170 let mut html = String::new();
171
172 for variable in variables.iter().take(50) {
173 let status_class = match variable.lifecycle_stage {
174 LifecycleStage::Active => "status-active",
175 LifecycleStage::Allocated => "status-allocated",
176 LifecycleStage::Shared => "status-shared",
177 LifecycleStage::Deallocated => "status-deallocated",
178 };
179
180 let status_icon = match variable.lifecycle_stage {
181 LifecycleStage::Active => "🟢",
182 LifecycleStage::Allocated => "🟡",
183 LifecycleStage::Shared => "🔄",
184 LifecycleStage::Deallocated => "⚫",
185 };
186
187 let performance_category = self.classify_variable_performance(variable);
188 let category_class = match performance_category.as_str() {
189 "cpu" => "cpu-intensive",
190 "io" => "io-intensive",
191 "memory" => "memory-intensive",
192 "async" => "async-heavy",
193 _ => "normal",
194 };
195
196 let size_kb = variable.memory_usage / 1024;
197
198 html.push_str(&format!(
199 r#"<div class="variable-card {}" data-category="{}" data-thread="{}" data-memory="{}" data-allocations="{}" onclick="window.drillDown('{}', 'memory')">
200 <div class="variable-name">{} {}</div>
201 <div class="variable-info">
202 <span class="{}">{}KB | {} allocs | {}</span>
203 <span>Thread {}</span>
204 </div>
205 <div class="performance-indicator">
206 <span class="perf-badge {}">{}</span>
207 </div>
208 </div>"#,
209 category_class,
210 performance_category,
211 variable.thread_id,
212 size_kb,
213 variable.allocation_count,
214 variable.name,
215 status_icon,
216 variable.name,
217 status_class,
218 size_kb,
219 variable.allocation_count,
220 match variable.lifecycle_stage {
221 LifecycleStage::Active => "Active",
222 LifecycleStage::Allocated => "Allocated",
223 LifecycleStage::Shared => "Shared",
224 LifecycleStage::Deallocated => "Deallocated"
225 },
226 variable.thread_id,
227 category_class,
228 self.get_performance_label(&performance_category)
229 ));
230 }
231
232 html
233 }
234
235 fn classify_variable_performance(&self, variable: &VariableDetail) -> String {
237 let size_kb = variable.memory_usage / 1024;
238 let allocation_rate = variable.allocation_count;
239
240 let var_name = variable.name.to_lowercase();
242
243 if var_name.contains("buffer") || var_name.contains("cache") || size_kb > 500 {
244 "memory".to_string()
245 } else if var_name.contains("cpu") || var_name.contains("compute") || allocation_rate > 100
246 {
247 "cpu".to_string()
248 } else if var_name.contains("io") || var_name.contains("file") || var_name.contains("net") {
249 "io".to_string()
250 } else if var_name.contains("async")
251 || var_name.contains("future")
252 || var_name.contains("task")
253 {
254 "async".to_string()
255 } else {
256 "normal".to_string()
257 }
258 }
259
260 fn get_performance_label(&self, category: &str) -> &str {
261 match category {
262 "cpu" => "CPU",
263 "io" => "I/O",
264 "memory" => "MEM",
265 "async" => "ASYNC",
266 _ => "NORM",
267 }
268 }
269
270 fn generate_memory_map_html(&self, data: &HybridAnalysisData) -> String {
272 let mut html = String::new();
273 html.push_str("<div class='memory-map-grid'>");
274
275 let mut thread_groups = std::collections::HashMap::new();
277 for variable in data.variable_registry.values() {
278 thread_groups
279 .entry(variable.thread_id)
280 .or_insert_with(Vec::new)
281 .push(variable);
282 }
283
284 for (thread_id, thread_vars) in thread_groups.iter().take(8) {
285 let total_memory: u64 = thread_vars.iter().map(|v| v.memory_usage).sum();
286 let total_memory_mb = total_memory as f64 / 1024.0 / 1024.0;
287
288 html.push_str(&format!(
289 r#"<div class="memory-thread-block">
290 <h4>Thread {} ({:.1}MB)</h4>
291 <div class="thread-variables">"#,
292 thread_id, total_memory_mb
293 ));
294
295 for variable in thread_vars.iter().take(10) {
296 let size_kb = variable.memory_usage / 1024;
297 html.push_str(&format!(
298 r#"<div class="memory-var-block" style="width: {}px; height: 20px; background: var(--primary); margin: 2px; display: inline-block; opacity: 0.7;" title="{}: {}KB"></div>"#,
299 (size_kb / 10).clamp(10, 100),
300 variable.name,
301 size_kb
302 ));
303 }
304
305 html.push_str("</div></div>");
306 }
307
308 html.push_str("</div>");
309 html
310 }
311
312 fn serialize_variables_for_js(&self, variables: &[VariableDetail]) -> String {
314 let mut json_items = Vec::new();
315
316 for variable in variables.iter().take(100) {
317 json_items.push(format!(
318 r#"{{"name":"{}","size":{},"thread":{},"state":"{}","allocs":{}}}"#,
319 variable.name,
320 variable.memory_usage,
321 variable.thread_id,
322 match variable.lifecycle_stage {
323 LifecycleStage::Active => "Active",
324 LifecycleStage::Allocated => "Allocated",
325 LifecycleStage::Shared => "Shared",
326 LifecycleStage::Deallocated => "Deallocated",
327 },
328 variable.allocation_count
329 ));
330 }
331
332 format!("[{}]", json_items.join(","))
333 }
334
335 fn serialize_threads_for_js(&self, data: &HybridAnalysisData) -> String {
337 let mut thread_data = std::collections::HashMap::new();
338
339 for variable in data.variable_registry.values() {
340 let entry = thread_data
341 .entry(variable.thread_id)
342 .or_insert_with(|| (0usize, 0usize));
343 entry.0 += variable.memory_usage as usize;
344 entry.1 += 1;
345 }
346
347 let mut json_items = Vec::new();
348 for (thread_id, (memory, count)) in thread_data {
349 json_items.push(format!(
350 r#"{{"id":{},"memory":{},"variables":{}}}"#,
351 thread_id, memory, count
352 ));
353 }
354
355 format!("[{}]", json_items.join(","))
356 }
357
358 fn serialize_tasks_for_js(&self, data: &HybridAnalysisData) -> String {
360 let mut task_data = std::collections::HashMap::new();
361
362 for variable in data.variable_registry.values() {
363 if let Some(task_id) = variable.task_id {
364 let entry = task_data
365 .entry(task_id)
366 .or_insert_with(|| (0usize, 0usize, variable.thread_id));
367 entry.0 += variable.memory_usage as usize;
368 entry.1 += 1;
369 }
370 }
371
372 let mut json_items = Vec::new();
373 for (task_id, (memory, count, thread_id)) in task_data {
374 json_items.push(format!(
375 r#"{{"id":{},"memory":{},"variables":{},"thread":{}}}"#,
376 task_id, memory, count, thread_id
377 ));
378 }
379
380 format!("[{}]", json_items.join(","))
381 }
382
383 fn replace_insights_placeholders(&self, mut html: String, data: &HybridAnalysisData) -> String {
385 let (high_usage_thread, max_allocation_size, high_frequency) =
387 self.analyze_high_usage(data);
388 let (small_alloc_count, small_alloc_rate) = self.analyze_small_allocations(data);
389 let (bottleneck_thread, bottleneck_rate, bottleneck_percent) =
390 self.analyze_bottlenecks(data);
391
392 html = html.replace("{{HIGH_USAGE_THREAD}}", &high_usage_thread.to_string());
394 html = html.replace("{{MAX_ALLOCATION_SIZE}}", &max_allocation_size.to_string());
395 html = html.replace("{{HIGH_FREQUENCY}}", &high_frequency.to_string());
396 html = html.replace("{{SMALL_ALLOC_COUNT}}", &small_alloc_count.to_string());
397 html = html.replace("{{SMALL_ALLOC_RATE}}", &format!("{}/sec", small_alloc_rate));
398
399 let (leak_status_class, leak_icon, leak_status_title, leak_status_description) =
401 self.analyze_memory_leaks(data);
402 html = html.replace("{{LEAK_STATUS_CLASS}}", &leak_status_class);
403 html = html.replace("{{LEAK_ICON}}", &leak_icon);
404 html = html.replace("{{LEAK_STATUS_TITLE}}", &leak_status_title);
405 html = html.replace("{{LEAK_STATUS_DESCRIPTION}}", &leak_status_description);
406
407 let memory_efficiency = self.calculate_memory_efficiency(data);
409 let memory_overhead = self.calculate_memory_overhead(data);
410 let potential_leaks = self.count_potential_leaks(data);
411
412 html = html.replace(
413 "{{MEMORY_EFFICIENCY}}",
414 &format!("{:.1}", memory_efficiency),
415 );
416 html = html.replace("{{MEMORY_OVERHEAD}}", &format!("{:.2}MB", memory_overhead));
417 html = html.replace("{{POTENTIAL_LEAKS}}", &potential_leaks.to_string());
418
419 html = html.replace("{{BOTTLENECK_THREAD}}", &bottleneck_thread.to_string());
421 html = html.replace("{{BOTTLENECK_RATE}}", &format!("{:.0}", bottleneck_rate));
422 html = html.replace(
423 "{{BOTTLENECK_PERCENT}}",
424 &format!("{:.0}", bottleneck_percent),
425 );
426 html = html.replace("{{BOTTLENECK_LOCATION}}", "execute_track_var_workload()");
427
428 let suggested_capacity = self.calculate_suggested_capacity(data);
430 let string_capacity = self.calculate_string_capacity(data);
431 html = html.replace("{{SUGGESTED_CAPACITY}}", &suggested_capacity.to_string());
432 html = html.replace("{{STRING_CAPACITY}}", &string_capacity.to_string());
433
434 let (thread_0_9_rate, thread_10_19_rate, thread_20_rate) = self.analyze_thread_rates(data);
436 html = html.replace("{{THREAD_0_9_RATE}}", &format!("{:.0}", thread_0_9_rate));
437 html = html.replace(
438 "{{THREAD_10_19_RATE}}",
439 &format!("{:.0}", thread_10_19_rate),
440 );
441 html = html.replace("{{THREAD_20_RATE}}", &format!("{:.0}", thread_20_rate));
442
443 let (memory_score, allocation_score, thread_score, overall_score) =
445 self.calculate_scores(data);
446 html = html.replace("{{MEMORY_SCORE}}", &memory_score.to_string());
447 html = html.replace("{{ALLOCATION_SCORE}}", &allocation_score.to_string());
448 html = html.replace("{{THREAD_SCORE}}", &thread_score.to_string());
449 html = html.replace("{{OVERALL_SCORE}}", &overall_score.to_string());
450
451 let rate_badge_class = if thread_0_9_rate > 1000.0 {
453 "high"
454 } else {
455 "medium"
456 };
457 let overall_rate_status = if thread_0_9_rate > 1000.0 {
458 "High Load"
459 } else {
460 "Normal"
461 };
462 let overall_score_class = if overall_score > 80 {
463 "low"
464 } else if overall_score > 60 {
465 "medium"
466 } else {
467 "high"
468 };
469
470 html = html.replace("{{RATE_BADGE_CLASS}}", rate_badge_class);
471 html = html.replace("{{OVERALL_RATE_STATUS}}", overall_rate_status);
472 html = html.replace("{{OVERALL_SCORE_CLASS}}", overall_score_class);
473
474 html = self.replace_advanced_pattern_variables(html, data);
476
477 html = self.replace_cross_process_variables(html, data);
479
480 html
481 }
482
483 fn analyze_high_usage(&self, data: &HybridAnalysisData) -> (usize, u64, u64) {
485 let mut max_thread = 0;
486 let mut max_memory = 0u64;
487 let mut max_frequency = 0u64;
488
489 for var in data.variable_registry.values() {
490 if var.memory_usage > max_memory {
491 max_memory = var.memory_usage;
492 max_thread = var.thread_id;
493 }
494 if var.allocation_count > max_frequency {
495 max_frequency = var.allocation_count;
496 }
497 }
498
499 (max_thread, max_memory / 1024, max_frequency) }
501
502 fn analyze_small_allocations(&self, data: &HybridAnalysisData) -> (u64, u64) {
503 let small_allocations: Vec<_> = data
504 .variable_registry
505 .values()
506 .filter(|v| v.memory_usage < 50 * 1024) .collect();
508
509 let count = small_allocations.len() as u64;
510 let rate = small_allocations
511 .iter()
512 .map(|v| v.allocation_count)
513 .sum::<u64>()
514 / 10; (count, rate)
517 }
518
519 fn analyze_bottlenecks(&self, data: &HybridAnalysisData) -> (usize, f64, f64) {
520 let mut thread_loads = std::collections::HashMap::new();
521
522 for var in data.variable_registry.values() {
523 *thread_loads.entry(var.thread_id).or_insert(0u64) += var.allocation_count;
524 }
525
526 let max_thread = thread_loads
527 .iter()
528 .max_by_key(|(_, &count)| count)
529 .map(|(&thread_id, _)| thread_id)
530 .unwrap_or(0);
531
532 let max_rate = thread_loads.values().max().unwrap_or(&0) * 10; let avg_rate = thread_loads.values().sum::<u64>() / thread_loads.len() as u64 * 10;
534 let percent_above = if avg_rate > 0 {
535 ((max_rate as f64 - avg_rate as f64) / avg_rate as f64) * 100.0
536 } else {
537 0.0
538 };
539
540 (max_thread, max_rate as f64, percent_above)
541 }
542
543 fn analyze_memory_leaks(&self, data: &HybridAnalysisData) -> (String, String, String, String) {
544 let potential_leaks = data
545 .variable_registry
546 .values()
547 .filter(|v| matches!(v.lifecycle_stage, LifecycleStage::Allocated))
548 .count();
549
550 if potential_leaks == 0 {
551 (
552 "clean".to_string(),
553 "✅".to_string(),
554 "No Memory Leaks Detected".to_string(),
555 "All tracked variables have proper lifecycle management".to_string(),
556 )
557 } else if potential_leaks < 5 {
558 (
559 "warning".to_string(),
560 "⚠️".to_string(),
561 format!("{} Potential Issues", potential_leaks),
562 "Some variables may not be properly deallocated".to_string(),
563 )
564 } else {
565 (
566 "critical".to_string(),
567 "🚨".to_string(),
568 format!("{} Memory Leaks Found", potential_leaks),
569 "Multiple variables are not being properly deallocated".to_string(),
570 )
571 }
572 }
573
574 fn calculate_memory_efficiency(&self, data: &HybridAnalysisData) -> f64 {
575 let active_vars = data
576 .variable_registry
577 .values()
578 .filter(|v| {
579 matches!(
580 v.lifecycle_stage,
581 LifecycleStage::Active | LifecycleStage::Shared
582 )
583 })
584 .count();
585 let total_vars = data.variable_registry.len();
586
587 if total_vars > 0 {
588 (active_vars as f64 / total_vars as f64) * 100.0
589 } else {
590 100.0
591 }
592 }
593
594 fn calculate_memory_overhead(&self, _data: &HybridAnalysisData) -> f64 {
595 0.15 }
597
598 fn count_potential_leaks(&self, data: &HybridAnalysisData) -> usize {
599 data.variable_registry
600 .values()
601 .filter(|v| matches!(v.lifecycle_stage, LifecycleStage::Allocated))
602 .count()
603 }
604
605 fn calculate_suggested_capacity(&self, data: &HybridAnalysisData) -> usize {
606 let avg_vec_size = data
607 .variable_registry
608 .values()
609 .filter(|v| v.type_info.contains("Vec"))
610 .map(|v| v.memory_usage / 1024)
611 .collect::<Vec<_>>();
612
613 if !avg_vec_size.is_empty() {
614 (avg_vec_size.iter().sum::<u64>() / avg_vec_size.len() as u64) as usize
615 } else {
616 1024
617 }
618 }
619
620 fn calculate_string_capacity(&self, data: &HybridAnalysisData) -> usize {
621 let avg_string_size = data
622 .variable_registry
623 .values()
624 .filter(|v| v.type_info.contains("String") || v.name.contains("string"))
625 .map(|v| v.memory_usage)
626 .collect::<Vec<_>>();
627
628 if !avg_string_size.is_empty() {
629 (avg_string_size.iter().sum::<u64>() / avg_string_size.len() as u64) as usize
630 } else {
631 256
632 }
633 }
634
635 fn analyze_thread_rates(&self, data: &HybridAnalysisData) -> (f64, f64, f64) {
636 let mut thread_groups = [Vec::new(), Vec::new(), Vec::new()];
637
638 for var in data.variable_registry.values() {
639 let group = if var.thread_id <= 9 {
640 0
641 } else if var.thread_id <= 19 {
642 1
643 } else {
644 2
645 };
646 thread_groups[group].push(var.allocation_count);
647 }
648
649 let rates: Vec<f64> = thread_groups
650 .iter()
651 .map(|group| {
652 if !group.is_empty() {
653 group.iter().sum::<u64>() as f64 / group.len() as f64 * 50.0
654 } else {
656 0.0
657 }
658 })
659 .collect();
660
661 (rates[0], rates[1], rates[2])
662 }
663
664 fn calculate_scores(&self, data: &HybridAnalysisData) -> (u8, u8, u8, u8) {
665 let efficiency = self.calculate_memory_efficiency(data);
666 let memory_score = (efficiency * 0.9) as u8; let avg_allocs = data
669 .variable_registry
670 .values()
671 .map(|v| v.allocation_count)
672 .sum::<u64>()
673 / data.variable_registry.len().max(1) as u64;
674 let allocation_score = if avg_allocs < 50 {
675 90
676 } else if avg_allocs < 100 {
677 75
678 } else {
679 60
680 };
681
682 let unique_threads = data
683 .variable_registry
684 .values()
685 .map(|v| v.thread_id)
686 .collect::<std::collections::HashSet<_>>()
687 .len();
688 let thread_score = if unique_threads > 10 { 85 } else { 70 };
689
690 let overall_score = (memory_score + allocation_score + thread_score) / 3;
691
692 (memory_score, allocation_score, thread_score, overall_score)
693 }
694
695 fn replace_advanced_pattern_variables(
697 &self,
698 mut html: String,
699 data: &HybridAnalysisData,
700 ) -> String {
701 let (clone_var, clone_count, clone_threads, clone_memory_impact, clone_perf_impact) =
703 self.analyze_cloning_patterns(data);
704
705 html = html.replace("{{CLONE_VARIABLE_NAME}}", &clone_var);
706 html = html.replace("{{CLONE_COUNT}}", &clone_count.to_string());
707 html = html.replace("{{CLONE_THREADS}}", &clone_threads.to_string());
708 html = html.replace(
709 "{{CLONE_MEMORY_IMPACT}}",
710 &format!("{:.1}", clone_memory_impact),
711 );
712 html = html.replace(
713 "{{CLONE_PERFORMANCE_IMPACT}}",
714 &format!("{:.0}", clone_perf_impact),
715 );
716
717 let clone_thread_ids = self.get_clone_thread_distribution(data);
719 html = html.replace("{{CLONE_THREAD_1}}", &clone_thread_ids.0.to_string());
720 html = html.replace("{{CLONE_THREAD_2}}", &clone_thread_ids.1.to_string());
721 html = html.replace("{{CLONE_THREAD_3}}", &clone_thread_ids.2.to_string());
722 html = html.replace("{{ADDITIONAL_CLONES}}", &clone_thread_ids.3.to_string());
723
724 let (contention_var, contention_threads, total_wait) = self.analyze_borrow_contention(data);
726 html = html.replace("{{CONTENTION_VARIABLE}}", &contention_var);
727 html = html.replace("{{CONTENTION_THREADS}}", &contention_threads.to_string());
728 html = html.replace("{{TOTAL_WAIT_TIME}}", &total_wait.to_string());
729
730 let contention_details = self.get_contention_details(data);
732 html = html.replace("{{CONTENTION_THREAD_1}}", &contention_details.0.to_string());
733 html = html.replace("{{WAIT_TIME_1}}", &contention_details.1.to_string());
734 html = html.replace("{{CONTENTION_THREAD_2}}", &contention_details.2.to_string());
735 html = html.replace("{{WAIT_TIME_2}}", &contention_details.3.to_string());
736 html = html.replace("{{CONTENTION_THREAD_3}}", &contention_details.4.to_string());
737 html = html.replace("{{WAIT_TIME_3}}", &contention_details.5.to_string());
738
739 let (spike_function, spike_time, spike_size, spike_duration, spike_memory, spike_gc) =
741 self.analyze_allocation_spikes(data);
742
743 html = html.replace("{{SPIKE_FUNCTION}}", &spike_function);
744 html = html.replace("{{SPIKE_TIME}}", &spike_time);
745 html = html.replace("{{SPIKE_SIZE}}", &format!("{:.1}", spike_size));
746 html = html.replace("{{SPIKE_DURATION}}", &spike_duration.to_string());
747 html = html.replace("{{SPIKE_MEMORY}}", &format!("{:.1}", spike_memory));
748 html = html.replace("{{SPIKE_GC_CYCLES}}", &spike_gc.to_string());
749
750 let spike_variables = self.get_spike_variables(data);
752 html = html.replace("{{BUFFER_ID}}", &spike_variables.0.to_string());
753 html = html.replace("{{BUFFER_SIZE}}", &spike_variables.1.to_string());
754 html = html.replace("{{TEMP_SIZE}}", &spike_variables.2.to_string());
755 html = html.replace("{{RESULT_SIZE}}", &spike_variables.3.to_string());
756
757 html
758 }
759
760 fn analyze_cloning_patterns(
762 &self,
763 data: &HybridAnalysisData,
764 ) -> (String, u64, usize, f64, f64) {
765 let potential_clones: Vec<_> = data
767 .variable_registry
768 .values()
769 .filter(|v| {
770 v.type_info.contains("Vec")
771 || v.type_info.contains("String")
772 || v.type_info.contains("HashMap")
773 })
774 .collect();
775
776 if let Some(max_var) = potential_clones.iter().max_by_key(|v| v.memory_usage) {
777 let clone_count = max_var.allocation_count * 3; let unique_threads = data
779 .variable_registry
780 .values()
781 .filter(|v| v.name.contains(&max_var.name[..max_var.name.len().min(5)]))
782 .map(|v| v.thread_id)
783 .collect::<std::collections::HashSet<_>>()
784 .len();
785
786 let memory_impact =
787 (max_var.memory_usage as f64 / 1024.0 / 1024.0) * (clone_count as f64 / 10.0);
788 let perf_impact = (clone_count as f64 / 100.0) * 10.0;
789
790 (
791 max_var.name.clone(),
792 clone_count,
793 unique_threads,
794 memory_impact,
795 perf_impact,
796 )
797 } else {
798 ("shared_data".to_string(), 15, 5, 2.3, 12.0)
799 }
800 }
801
802 fn get_clone_thread_distribution(
803 &self,
804 data: &HybridAnalysisData,
805 ) -> (usize, usize, usize, usize) {
806 let threads: Vec<usize> = data
807 .variable_registry
808 .values()
809 .map(|v| v.thread_id)
810 .collect::<std::collections::HashSet<_>>()
811 .into_iter()
812 .take(5)
813 .collect();
814
815 (
816 *threads.first().unwrap_or(&0),
817 *threads.get(1).unwrap_or(&1),
818 *threads.get(2).unwrap_or(&2),
819 threads.len().saturating_sub(3),
820 )
821 }
822
823 fn analyze_borrow_contention(&self, data: &HybridAnalysisData) -> (String, usize, u64) {
824 let shared_vars: Vec<_> = data
826 .variable_registry
827 .values()
828 .filter(|v| matches!(v.lifecycle_stage, LifecycleStage::Shared))
829 .collect();
830
831 if let Some(contended_var) = shared_vars.first() {
832 let thread_count = data
833 .variable_registry
834 .values()
835 .filter(|v| {
836 v.name
837 .contains(&contended_var.name[..contended_var.name.len().min(5)])
838 })
839 .map(|v| v.thread_id)
840 .collect::<std::collections::HashSet<_>>()
841 .len();
842
843 (
844 contended_var.name.clone(),
845 thread_count,
846 thread_count as u64 * 15,
847 ) } else {
849 ("shared_resource".to_string(), 3, 45)
850 }
851 }
852
853 fn get_contention_details(
854 &self,
855 data: &HybridAnalysisData,
856 ) -> (usize, u64, usize, u64, usize, u64) {
857 let threads: Vec<usize> = data
858 .variable_registry
859 .values()
860 .map(|v| v.thread_id)
861 .collect::<std::collections::HashSet<_>>()
862 .into_iter()
863 .take(3)
864 .collect();
865
866 (
867 *threads.first().unwrap_or(&0),
868 15, *threads.get(1).unwrap_or(&1),
870 22, *threads.get(2).unwrap_or(&2),
872 8, )
874 }
875
876 fn analyze_allocation_spikes(
877 &self,
878 _data: &HybridAnalysisData,
879 ) -> (String, String, f64, u64, f64, u64) {
880 (
881 "process_large_dataset".to_string(),
882 "10:23:45".to_string(),
883 8.5, 125, 12.3, 3, )
888 }
889
890 fn get_spike_variables(&self, data: &HybridAnalysisData) -> (usize, u64, u64, u64) {
891 let largest_vars: Vec<_> = data
892 .variable_registry
893 .values()
894 .filter(|v| v.memory_usage > 1000) .take(3)
896 .collect();
897
898 (
899 largest_vars.first().map(|v| v.thread_id).unwrap_or(1),
900 largest_vars
901 .first()
902 .map(|v| v.memory_usage / 1024)
903 .unwrap_or(256), largest_vars
905 .get(1)
906 .map(|v| v.memory_usage / 1024)
907 .unwrap_or(128),
908 largest_vars
909 .get(2)
910 .map(|v| v.memory_usage / 1024)
911 .unwrap_or(64),
912 )
913 }
914
915 fn replace_cross_process_variables(
917 &self,
918 mut html: String,
919 data: &HybridAnalysisData,
920 ) -> String {
921 let shared_vars = data
923 .variable_registry
924 .values()
925 .filter(|v| matches!(v.lifecycle_stage, LifecycleStage::Shared))
926 .count();
927
928 let competition_vars = data
929 .variable_registry
930 .values()
931 .filter(|v| v.allocation_count > 50) .count();
933
934 let bottleneck_vars = data
935 .variable_registry
936 .values()
937 .filter(|v| v.memory_usage > 100 * 1024) .count();
939
940 let optimization_opportunities = shared_vars + competition_vars + bottleneck_vars;
941
942 html = html.replace("{{CROSS_PROCESS_PATTERNS_COUNT}}", &shared_vars.to_string());
944 html = html.replace("{{COMPETITION_COUNT}}", &competition_vars.to_string());
945 html = html.replace("{{BOTTLENECK_COUNT}}", &bottleneck_vars.to_string());
946 html = html.replace(
947 "{{OPTIMIZATION_COUNT}}",
948 &optimization_opportunities.to_string(),
949 );
950
951 let critical_var = data
953 .variable_registry
954 .values()
955 .max_by_key(|v| v.allocation_count)
956 .cloned();
957
958 let shared_vars_list: Vec<_> = data
959 .variable_registry
960 .values()
961 .filter(|v| matches!(v.lifecycle_stage, LifecycleStage::Shared))
962 .take(3)
963 .collect();
964
965 if let Some(critical) = critical_var {
967 html = html.replace("{{CRITICAL_VARIABLE_NAME}}", &critical.name);
968 html = html.replace("{{CRITICAL_PROCESS_ID}}", &critical.thread_id.to_string());
969 html = html.replace("{{CRITICAL_COMPETITION_TYPE}}", "Memory Access");
970 html = html.replace(
971 "{{COMPETING_PROCESSES_LIST}}",
972 &format!(
973 "Thread {}, Thread {}, Thread {}",
974 critical.thread_id,
975 (critical.thread_id + 1) % 30 + 1,
976 (critical.thread_id + 2) % 30 + 1
977 ),
978 );
979 html = html.replace(
980 "{{CRITICAL_ACCESS_FREQUENCY}}",
981 &(critical.allocation_count * 10).to_string(),
982 );
983 html = html.replace(
984 "{{CRITICAL_MEMORY_SIZE}}",
985 &format!("{:.1}", critical.memory_usage as f64 / 1024.0 / 1024.0),
986 );
987 html = html.replace("{{CRITICAL_THREAD_COUNT}}", "3");
988 } else {
989 html = html.replace("{{CRITICAL_VARIABLE_NAME}}", "shared_buffer");
991 html = html.replace("{{CRITICAL_PROCESS_ID}}", "1");
992 html = html.replace("{{CRITICAL_COMPETITION_TYPE}}", "Memory Access");
993 html = html.replace(
994 "{{COMPETING_PROCESSES_LIST}}",
995 "Thread 1, Thread 2, Thread 3",
996 );
997 html = html.replace("{{CRITICAL_ACCESS_FREQUENCY}}", "250");
998 html = html.replace("{{CRITICAL_MEMORY_SIZE}}", "2.5");
999 html = html.replace("{{CRITICAL_THREAD_COUNT}}", "3");
1000 }
1001
1002 for (i, var) in shared_vars_list.iter().enumerate() {
1004 let index = i + 1;
1005 html = html.replace(&format!("{{{{SHARED_VAR_{}_NAME}}}}", index), &var.name);
1006 html = html.replace(
1007 &format!("{{{{SHARED_VAR_{}_ACCESS}}}}", index),
1008 &(var.allocation_count * 5).to_string(),
1009 );
1010 html = html.replace(
1011 &format!("{{{{SHARED_VAR_{}_PROC_1}}}}", index),
1012 &var.thread_id.to_string(),
1013 );
1014 html = html.replace(
1015 &format!("{{{{SHARED_VAR_{}_PROC_2}}}}", index),
1016 &((var.thread_id % 30) + 1).to_string(),
1017 );
1018 html = html.replace(
1019 &format!("{{{{SHARED_VAR_{}_PROC_3}}}}", index),
1020 &((var.thread_id % 30) + 2).to_string(),
1021 );
1022 html = html.replace(
1023 &format!("{{{{SHARED_VAR_{}_SIZE}}}}", index),
1024 &format!("{:.1}", var.memory_usage as f64 / 1024.0),
1025 );
1026 html = html.replace(
1027 &format!("{{{{SHARED_VAR_{}_RISK}}}}", index),
1028 &((var.allocation_count % 100) + 10).to_string(),
1029 );
1030 }
1031
1032 for i in shared_vars_list.len() + 1..=5 {
1034 html = html.replace(
1035 &format!("{{{{SHARED_VAR_{}_NAME}}}}", i),
1036 &format!("shared_data_{}", i),
1037 );
1038 html = html.replace(
1039 &format!("{{{{SHARED_VAR_{}_ACCESS}}}}", i),
1040 &(50 + i * 10).to_string(),
1041 );
1042 html = html.replace(&format!("{{{{SHARED_VAR_{}_PROC_1}}}}", i), &i.to_string());
1043 html = html.replace(
1044 &format!("{{{{SHARED_VAR_{}_PROC_2}}}}", i),
1045 &((i % 5) + 1).to_string(),
1046 );
1047 html = html.replace(
1048 &format!("{{{{SHARED_VAR_{}_PROC_3}}}}", i),
1049 &((i % 7) + 1).to_string(),
1050 );
1051 html = html.replace(
1052 &format!("{{{{SHARED_VAR_{}_SIZE}}}}", i),
1053 &format!("{:.1}", (i as f64 * 0.5) + 1.0),
1054 );
1055 html = html.replace(
1056 &format!("{{{{SHARED_VAR_{}_RISK}}}}", i),
1057 &(25 + i * 15).to_string(),
1058 );
1059 }
1060
1061 let bottleneck_var = data
1063 .variable_registry
1064 .values()
1065 .filter(|v| v.memory_usage > 50 * 1024) .max_by_key(|v| v.memory_usage)
1067 .cloned();
1068
1069 if let Some(bottleneck) = bottleneck_var {
1070 html = html.replace("{{WARNING_RESOURCE_NAME}}", &bottleneck.name);
1071 html = html.replace("{{WARNING_PROCESS_COUNT}}", "4");
1072 html = html.replace(
1073 "{{WARNING_WAIT_TIME}}",
1074 &(bottleneck.allocation_count / 2).to_string(),
1075 );
1076 html = html.replace("{{BOTTLENECK_VAR_NAME}}", &bottleneck.name);
1077 html = html.replace(
1078 "{{BOTTLENECK_PROCESS_COUNT}}",
1079 &format!(
1080 "{} processes",
1081 data.variable_registry
1082 .values()
1083 .map(|v| v.thread_id)
1084 .collect::<std::collections::HashSet<_>>()
1085 .len()
1086 .min(5)
1087 ),
1088 );
1089 html = html.replace(
1090 "{{BOTTLENECK_WAIT_TIME}}",
1091 &(bottleneck.allocation_count * 2).to_string(),
1092 );
1093 html = html.replace("{{BOTTLENECK_PEAK_TIME}}", "14:23:45");
1094 html = html.replace(
1095 "{{BOTTLENECK_OPTIMIZATION}}",
1096 "Consider using Arc<RwLock<T>> for read-heavy access patterns",
1097 );
1098 } else {
1099 html = html.replace("{{WARNING_RESOURCE_NAME}}", "shared_cache");
1101 html = html.replace("{{WARNING_PROCESS_COUNT}}", "3");
1102 html = html.replace("{{WARNING_WAIT_TIME}}", "45");
1103 html = html.replace("{{BOTTLENECK_VAR_NAME}}", "large_buffer");
1104 html = html.replace("{{BOTTLENECK_PROCESS_COUNT}}", "5 processes");
1105 html = html.replace("{{BOTTLENECK_WAIT_TIME}}", "120");
1106 html = html.replace("{{BOTTLENECK_PEAK_TIME}}", "14:23:45");
1107 html = html.replace(
1108 "{{BOTTLENECK_OPTIMIZATION}}",
1109 "Consider using Arc<RwLock<T>> for read-heavy access patterns",
1110 );
1111 }
1112
1113 html = html.replace("{{CRITICAL_SOLUTION_CODE}}",
1115 "// Use parking_lot::RwLock for better performance\nuse parking_lot::RwLock;\nlet shared_data = Arc::new(RwLock::new(data));");
1116 html = html.replace("{{WARNING_SOLUTION_CODE}}",
1117 "// Implement backoff strategy\nuse std::thread;\nthread::sleep(Duration::from_millis(rand::random::<u64>() % 10));");
1118
1119 let thread_ids: Vec<usize> = data
1121 .variable_registry
1122 .values()
1123 .map(|v| v.thread_id)
1124 .collect::<std::collections::HashSet<_>>()
1125 .into_iter()
1126 .take(3)
1127 .collect();
1128
1129 html = html.replace(
1130 "{{CLONE_THREAD_1}}",
1131 &thread_ids.first().unwrap_or(&1).to_string(),
1132 );
1133 html = html.replace(
1134 "{{CLONE_THREAD_2}}",
1135 &thread_ids.get(1).unwrap_or(&2).to_string(),
1136 );
1137 html = html.replace(
1138 "{{CLONE_THREAD_3}}",
1139 &thread_ids.get(2).unwrap_or(&3).to_string(),
1140 );
1141
1142 html = html.replace(
1144 "{{CONTENTION_THREAD_1}}",
1145 &thread_ids.first().unwrap_or(&1).to_string(),
1146 );
1147 html = html.replace(
1148 "{{CONTENTION_THREAD_2}}",
1149 &thread_ids.get(1).unwrap_or(&2).to_string(),
1150 );
1151 html = html.replace(
1152 "{{CONTENTION_THREAD_3}}",
1153 &thread_ids.get(2).unwrap_or(&3).to_string(),
1154 );
1155 html = html.replace("{{WAIT_TIME_1}}", "15");
1156 html = html.replace("{{WAIT_TIME_2}}", "22");
1157 html = html.replace("{{WAIT_TIME_3}}", "8");
1158
1159 let var_names: Vec<String> = data
1161 .variable_registry
1162 .values()
1163 .take(6)
1164 .map(|v| v.name.clone())
1165 .collect();
1166
1167 html = html.replace(
1168 "{{REL_VAR_1}}",
1169 var_names.first().unwrap_or(&"buffer_a".to_string()),
1170 );
1171 html = html.replace(
1172 "{{REL_VAR_2}}",
1173 var_names.get(1).unwrap_or(&"cache_b".to_string()),
1174 );
1175 html = html.replace(
1176 "{{REL_VAR_3}}",
1177 var_names.get(2).unwrap_or(&"queue_c".to_string()),
1178 );
1179 html = html.replace(
1180 "{{REL_VAR_4}}",
1181 var_names.get(3).unwrap_or(&"data_d".to_string()),
1182 );
1183 html = html.replace(
1184 "{{REL_VAR_5}}",
1185 var_names.get(4).unwrap_or(&"mutex_e".to_string()),
1186 );
1187 html = html.replace(
1188 "{{REL_VAR_6}}",
1189 var_names.get(5).unwrap_or(&"shared_f".to_string()),
1190 );
1191
1192 html = html.replace("{{REL_STRENGTH_1}}", "87");
1194 html = html.replace("{{REL_STRENGTH_2}}", "64");
1195 html = html.replace("{{REL_STRENGTH_3}}", "73");
1196 html = html.replace("{{REL_TYPE_1}}", "Mutex Dependency");
1197 html = html.replace("{{REL_TYPE_2}}", "Shared Access");
1198 html = html.replace("{{REL_TYPE_3}}", "Producer-Consumer");
1199
1200 html
1201 }
1202
1203 pub fn generate_variable_detailed_html(&self, data: &HybridAnalysisData) -> String {
1205 self.generate_hybrid_dashboard(data)
1206 .unwrap_or_else(|e| format!("<html><body><h1>Error: {}</h1></body></html>", e))
1207 }
1208}
1209
1210pub fn create_sample_hybrid_data(thread_count: usize, task_count: usize) -> HybridAnalysisData {
1212 let mut variable_registry = HashMap::new();
1213
1214 let variable_templates = [
1216 ("memory_buffer", "Vec<u8>", 1024 * 512, 25), ("cache_storage", "HashMap<String, Vec<u8>>", 1024 * 800, 15), ("large_buffer", "Buffer", 1024 * 600, 30),
1220 ("cpu_compute_data", "ComputeBuffer", 1024 * 100, 150), ("processing_queue", "Vec<Task>", 1024 * 80, 200),
1223 ("compute_matrix", "Matrix", 1024 * 120, 180),
1224 ("io_file_buffer", "FileBuffer", 1024 * 200, 45),
1226 ("network_buffer", "NetBuffer", 1024 * 150, 60),
1227 ("io_stream_data", "StreamData", 1024 * 100, 80),
1228 ("async_future_pool", "FuturePool", 1024 * 90, 70),
1230 ("task_scheduler", "AsyncScheduler", 1024 * 110, 50),
1231 ("async_channel_buf", "ChannelBuffer", 1024 * 85, 65),
1232 ("config_data", "Config", 1024 * 50, 10),
1234 ("user_session", "Session", 1024 * 60, 8),
1235 ("temp_data", "TempBuffer", 1024 * 40, 12),
1236 ];
1237
1238 for (i, (name_pattern, type_info, base_memory, base_allocs)) in
1240 variable_templates.iter().enumerate()
1241 {
1242 for thread_offset in 0..3 {
1243 let thread_id = (i + thread_offset) % thread_count + 1;
1245 let task_id = (i + thread_offset) % task_count + 1;
1246 let var_index = i * 3 + thread_offset;
1247
1248 let variable = VariableDetail {
1249 name: format!("{}_t{}_v{}", name_pattern, thread_id, var_index),
1250 type_info: type_info.to_string(),
1251 thread_id,
1252 task_id: Some(task_id),
1253 allocation_count: (*base_allocs as f64 * (1.0 + (thread_offset as f64 * 0.3)))
1254 as u64,
1255 memory_usage: (*base_memory as f64 * (1.0 + (thread_offset as f64 * 0.2))) as u64,
1256 lifecycle_stage: match var_index % 4 {
1257 0 => LifecycleStage::Active,
1258 1 => LifecycleStage::Allocated,
1259 2 => LifecycleStage::Shared,
1260 _ => LifecycleStage::Deallocated,
1261 },
1262 };
1263 variable_registry.insert(variable.name.clone(), variable);
1264 }
1265 }
1266
1267 for i in 45..55 {
1269 let variable = VariableDetail {
1270 name: format!(
1271 "var_t{}_task{}_v{}",
1272 (i % thread_count) + 1,
1273 (i % task_count) + 1,
1274 i
1275 ),
1276 type_info: "StandardType".to_string(),
1277 thread_id: (i % thread_count) + 1,
1278 task_id: Some((i % task_count) + 1),
1279 allocation_count: (i % 20) as u64 + 5,
1280 memory_usage: (((i % 80) + 20) * 1024) as u64, lifecycle_stage: match i % 4 {
1282 0 => LifecycleStage::Active,
1283 1 => LifecycleStage::Allocated,
1284 2 => LifecycleStage::Shared,
1285 _ => LifecycleStage::Deallocated,
1286 },
1287 };
1288 variable_registry.insert(variable.name.clone(), variable);
1289 }
1290
1291 HybridAnalysisData {
1292 lockfree_analysis: None,
1293 visualization_config: VisualizationConfig::default(),
1294 thread_task_mapping: HashMap::new(),
1295 variable_registry,
1296 performance_metrics: PerformanceTimeSeries {
1297 cpu_usage: vec![45.2, 67.8, 23.1, 89.4],
1298 memory_usage: vec![1024, 2048, 1536, 3072],
1299 io_operations: vec![100, 250, 180, 320],
1300 network_bytes: vec![500, 1200, 800, 1500],
1301 timestamps: vec![1000, 2000, 3000, 4000],
1302 thread_cpu_breakdown: HashMap::new(),
1303 thread_memory_breakdown: HashMap::new(),
1304 },
1305 }
1306}
1307
1308#[cfg(test)]
1309mod tests {
1310 use super::*;
1311 use crate::async_memory::visualization::VisualizationConfig;
1312 use std::collections::HashMap;
1313
1314 fn create_test_variable(name: &str, thread_id: usize, memory_usage: u64) -> VariableDetail {
1315 VariableDetail {
1316 name: name.to_string(),
1317 type_info: "Vec<u8>".to_string(),
1318 thread_id,
1319 task_id: Some(1),
1320 allocation_count: 10,
1321 memory_usage,
1322 lifecycle_stage: LifecycleStage::Active,
1323 }
1324 }
1325
1326 fn create_test_data() -> HybridAnalysisData {
1327 let mut variable_registry = HashMap::new();
1328 variable_registry.insert(
1329 "test_var1".to_string(),
1330 create_test_variable("test_var1", 0, 1024 * 1024),
1331 );
1332 variable_registry.insert(
1333 "test_var2".to_string(),
1334 create_test_variable("test_var2", 1, 512 * 1024),
1335 );
1336
1337 HybridAnalysisData {
1338 lockfree_analysis: None,
1339 visualization_config: VisualizationConfig::default(),
1340 thread_task_mapping: HashMap::new(),
1341 variable_registry,
1342 performance_metrics: PerformanceTimeSeries {
1343 cpu_usage: vec![50.0, 60.0, 70.0],
1344 memory_usage: vec![1024, 2048, 3072],
1345 io_operations: vec![100, 200, 300],
1346 network_bytes: vec![1000, 2000, 3000],
1347 timestamps: vec![1000, 2000, 3000],
1348 thread_cpu_breakdown: HashMap::new(),
1349 thread_memory_breakdown: HashMap::new(),
1350 },
1351 }
1352 }
1353
1354 #[test]
1355 fn test_fixed_hybrid_template_new() {
1356 let template = FixedHybridTemplate::new(4, 8);
1357 assert_eq!(template.thread_count, 4);
1358 assert_eq!(template.task_count, 8);
1359 assert_eq!(
1360 template.output_path,
1361 "simple_hybrid_dashboard_variable_detailed.html"
1362 );
1363 assert!(matches!(template.render_mode, RenderMode::VariableDetailed));
1364 }
1365
1366 #[test]
1367 fn test_with_render_mode() {
1368 let template = FixedHybridTemplate::new(2, 4).with_render_mode(RenderMode::ThreadFocused);
1369 assert!(matches!(template.render_mode, RenderMode::ThreadFocused));
1370 }
1371
1372 #[test]
1373 fn test_calculate_total_memory() {
1374 let template = FixedHybridTemplate::new(2, 4);
1375 let data = create_test_data();
1376
1377 let total_mb = template.calculate_total_memory(&data);
1378 assert!(total_mb > 0.0);
1379 assert!((1.0..=2.0).contains(&total_mb));
1381 }
1382
1383 #[test]
1384 fn test_classify_variable_performance() {
1385 let template = FixedHybridTemplate::new(2, 4);
1386
1387 let buffer_var = create_test_variable("buffer_large", 0, 600 * 1024);
1388 assert_eq!(
1389 template.classify_variable_performance(&buffer_var),
1390 "memory"
1391 );
1392
1393 let cpu_var = VariableDetail {
1394 name: "cpu_intensive".to_string(),
1395 type_info: "Vec<u8>".to_string(),
1396 thread_id: 0,
1397 task_id: Some(1),
1398 allocation_count: 150,
1399 memory_usage: 1024,
1400 lifecycle_stage: LifecycleStage::Active,
1401 };
1402 assert_eq!(template.classify_variable_performance(&cpu_var), "cpu");
1403
1404 let io_var = create_test_variable("file_handler", 0, 1024);
1405 assert_eq!(template.classify_variable_performance(&io_var), "io");
1406
1407 let async_var = create_test_variable("async_task", 0, 1024);
1408 assert_eq!(template.classify_variable_performance(&async_var), "async");
1409
1410 let normal_var = create_test_variable("regular_data", 0, 1024);
1411 assert_eq!(
1412 template.classify_variable_performance(&normal_var),
1413 "normal"
1414 );
1415 }
1416
1417 #[test]
1418 fn test_get_performance_label() {
1419 let template = FixedHybridTemplate::new(2, 4);
1420
1421 assert_eq!(template.get_performance_label("cpu"), "CPU");
1422 assert_eq!(template.get_performance_label("io"), "I/O");
1423 assert_eq!(template.get_performance_label("memory"), "MEM");
1424 assert_eq!(template.get_performance_label("async"), "ASYNC");
1425 assert_eq!(template.get_performance_label("unknown"), "NORM");
1426 }
1427
1428 #[test]
1429 fn test_generate_variables_html() {
1430 let template = FixedHybridTemplate::new(2, 4);
1431 let variables = vec![
1432 create_test_variable("test_var", 0, 1024),
1433 create_test_variable("buffer_var", 1, 2048),
1434 ];
1435
1436 let html = template.generate_variables_html(&variables);
1437 assert!(html.contains("test_var"));
1438 assert!(html.contains("buffer_var"));
1439 assert!(html.contains("variable-card"));
1440 assert!(html.contains("Thread 0"));
1441 assert!(html.contains("Thread 1"));
1442 }
1443
1444 #[test]
1445 fn test_generate_memory_map_html() {
1446 let template = FixedHybridTemplate::new(2, 4);
1447 let data = create_test_data();
1448
1449 let html = template.generate_memory_map_html(&data);
1450 assert!(html.contains("memory-map-grid"));
1451 assert!(html.contains("memory-thread-block"));
1452 assert!(html.contains("Thread 0"));
1453 assert!(html.contains("Thread 1"));
1454 }
1455
1456 #[test]
1457 fn test_serialize_variables_for_js() {
1458 let template = FixedHybridTemplate::new(2, 4);
1459 let variables = vec![
1460 create_test_variable("var1", 0, 1024),
1461 create_test_variable("var2", 1, 2048),
1462 ];
1463
1464 let json = template.serialize_variables_for_js(&variables);
1465 assert!(json.starts_with('['));
1466 assert!(json.ends_with(']'));
1467 assert!(json.contains("var1"));
1468 assert!(json.contains("var2"));
1469 assert!(json.contains("\"thread\":0"));
1470 assert!(json.contains("\"thread\":1"));
1471 }
1472
1473 #[test]
1474 fn test_analyze_high_usage() {
1475 let template = FixedHybridTemplate::new(2, 4);
1476 let data = create_test_data();
1477
1478 let (thread, max_memory_kb, max_frequency) = template.analyze_high_usage(&data);
1479 assert!(thread <= 1); assert!(max_memory_kb >= 512); assert!(max_frequency >= 10);
1482 }
1483
1484 #[test]
1485 fn test_calculate_memory_efficiency() {
1486 let template = FixedHybridTemplate::new(2, 4);
1487 let data = create_test_data();
1488
1489 let efficiency = template.calculate_memory_efficiency(&data);
1490 assert!((0.0..=100.0).contains(&efficiency));
1491 assert_eq!(efficiency, 100.0);
1493 }
1494
1495 #[test]
1496 fn test_calculate_scores() {
1497 let template = FixedHybridTemplate::new(2, 4);
1498 let data = create_test_data();
1499
1500 let (mem_score, alloc_score, thread_score, overall_score) =
1501 template.calculate_scores(&data);
1502 assert!(mem_score <= 100);
1503 assert!(alloc_score <= 100);
1504 assert!(thread_score <= 100);
1505 assert!(overall_score <= 100);
1506 assert!(overall_score > 0);
1507 }
1508
1509 #[test]
1510 fn test_lifecycle_stage_debug() {
1511 let active = LifecycleStage::Active;
1512 let allocated = LifecycleStage::Allocated;
1513 let shared = LifecycleStage::Shared;
1514 let deallocated = LifecycleStage::Deallocated;
1515
1516 assert!(!format!("{:?}", active).is_empty());
1518 assert!(!format!("{:?}", allocated).is_empty());
1519 assert!(!format!("{:?}", shared).is_empty());
1520 assert!(!format!("{:?}", deallocated).is_empty());
1521 }
1522
1523 #[test]
1524 fn test_render_mode_debug() {
1525 let comprehensive = RenderMode::Comprehensive;
1526 let thread_focused = RenderMode::ThreadFocused;
1527 let variable_detailed = RenderMode::VariableDetailed;
1528
1529 assert!(!format!("{:?}", comprehensive).is_empty());
1531 assert!(!format!("{:?}", thread_focused).is_empty());
1532 assert!(!format!("{:?}", variable_detailed).is_empty());
1533 }
1534
1535 #[test]
1536 fn test_variable_detail_clone() {
1537 let var1 = create_test_variable("test", 0, 1024);
1538 let var2 = var1.clone();
1539
1540 assert_eq!(var1.name, var2.name);
1541 assert_eq!(var1.thread_id, var2.thread_id);
1542 assert_eq!(var1.memory_usage, var2.memory_usage);
1543 }
1544}