1use crate::ir::{FrameCategory, ProfileIR};
6
7use super::{CpuAnalysis, FunctionStats, PerformancePattern};
8
9#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
11pub enum Priority {
12 Critical,
14 High,
16 Medium,
18 Low,
20}
21
22impl std::fmt::Display for Priority {
23 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
24 match self {
25 Self::Critical => write!(f, "CRITICAL"),
26 Self::High => write!(f, "HIGH"),
27 Self::Medium => write!(f, "MEDIUM"),
28 Self::Low => write!(f, "LOW"),
29 }
30 }
31}
32
33#[derive(Debug, Clone, Copy, PartialEq, Eq)]
35pub enum Effort {
36 QuickWin,
38 Moderate,
40 Significant,
42 Major,
44}
45
46impl std::fmt::Display for Effort {
47 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
48 match self {
49 Self::QuickWin => write!(f, "Quick Win"),
50 Self::Moderate => write!(f, "Moderate"),
51 Self::Significant => write!(f, "Significant"),
52 Self::Major => write!(f, "Major Refactor"),
53 }
54 }
55}
56
57#[derive(Debug, Clone, Copy, PartialEq, Eq)]
59pub enum IssueType {
60 Algorithm,
62 Memory,
64 Dependency,
66 Serialization,
68 Caching,
70 Startup,
72 Recursion,
74 Hotspot,
76}
77
78impl std::fmt::Display for IssueType {
79 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
80 match self {
81 Self::Algorithm => write!(f, "Algorithm"),
82 Self::Memory => write!(f, "Memory"),
83 Self::Dependency => write!(f, "Dependency"),
84 Self::Serialization => write!(f, "Serialization"),
85 Self::Caching => write!(f, "Caching"),
86 Self::Startup => write!(f, "Startup"),
87 Self::Recursion => write!(f, "Recursion"),
88 Self::Hotspot => write!(f, "Hotspot"),
89 }
90 }
91}
92
93#[derive(Debug, Clone)]
95pub struct Recommendation {
96 pub priority: Priority,
98 pub effort: Effort,
100 pub issue_type: IssueType,
102 pub title: String,
104 pub target: String,
106 pub location: String,
108 pub current_time_us: u64,
110 pub estimated_savings_us: u64,
112 pub root_cause: String,
114 pub actions: Vec<String>,
116 pub code_patterns: Vec<String>,
118 pub evidence: Vec<String>,
120}
121
122impl Recommendation {
123 pub fn roi_score(&self) -> f64 {
125 let effort_multiplier = match self.effort {
126 Effort::QuickWin => 4.0,
127 Effort::Moderate => 2.0,
128 Effort::Significant => 1.0,
129 Effort::Major => 0.5,
130 };
131 self.estimated_savings_us as f64 * effort_multiplier
132 }
133
134 #[expect(clippy::cast_precision_loss)]
136 pub fn savings_percent(&self, total_time: u64) -> f64 {
137 if total_time == 0 {
138 0.0
139 } else {
140 (self.estimated_savings_us as f64 / total_time as f64) * 100.0
141 }
142 }
143}
144
145#[derive(Debug, Clone)]
147pub struct RecommendationReport {
148 pub recommendations: Vec<Recommendation>,
150 pub insights: Vec<String>,
152 pub quick_wins: Vec<usize>, pub investigations: Vec<String>,
156}
157
158pub struct RecommendationEngine;
160
161impl RecommendationEngine {
162 fn should_include_category(category: FrameCategory, analysis: &CpuAnalysis) -> bool {
164 let filters = &analysis.metadata.filter_categories;
165 filters.is_empty() || filters.contains(&category)
168 }
169
170 #[expect(clippy::cast_precision_loss)]
172 pub fn analyze(profile: &ProfileIR, analysis: &CpuAnalysis) -> RecommendationReport {
173 let mut recommendations = Vec::new();
174 let mut insights = Vec::new();
175 let mut investigations = Vec::new();
176
177 let total_time = analysis.total_time;
178 let filters = &analysis.metadata.filter_categories;
179
180 Self::analyze_hotspots(analysis, total_time, &mut recommendations, &mut insights);
182
183 let should_analyze_gc = filters.is_empty()
185 || filters.contains(&FrameCategory::V8Internal)
186 || filters.contains(&FrameCategory::App) || filters.contains(&FrameCategory::Deps);
188 if should_analyze_gc {
189 Self::analyze_gc(analysis, total_time, &mut recommendations, &mut insights);
190 }
191
192 if Self::should_include_category(FrameCategory::Deps, analysis) {
194 Self::analyze_dependencies(analysis, total_time, &mut recommendations, &mut insights);
195 }
196
197 Self::analyze_recursion(analysis, total_time, &mut recommendations);
199
200 Self::analyze_phases(analysis, total_time, &mut recommendations, &mut insights);
202
203 Self::detect_patterns(profile, analysis, total_time, &mut recommendations);
205
206 Self::generate_investigations(analysis, &mut investigations);
208
209 let min_savings_threshold = total_time / 200; recommendations.retain(|r| {
214 let savings_pct = r.savings_percent(total_time);
215 if r.estimated_savings_us < min_savings_threshold {
217 return false;
218 }
219 if matches!(r.effort, Effort::Significant | Effort::Major) && savings_pct < 2.0 {
221 return false;
222 }
223 true
224 });
225
226 recommendations.sort_by(|a, b| {
228 a.priority.cmp(&b.priority).then_with(|| {
229 b.roi_score()
230 .partial_cmp(&a.roi_score())
231 .unwrap_or(std::cmp::Ordering::Equal)
232 })
233 });
234
235 let quick_wins: Vec<usize> = recommendations
237 .iter()
238 .enumerate()
239 .filter(|(_, r)| r.effort == Effort::QuickWin && r.savings_percent(total_time) >= 1.0)
240 .map(|(i, _)| i)
241 .collect();
242
243 RecommendationReport {
244 recommendations,
245 insights,
246 quick_wins,
247 investigations,
248 }
249 }
250
251 #[expect(clippy::cast_precision_loss)]
252 fn analyze_hotspots(
253 analysis: &CpuAnalysis,
254 total_time: u64,
255 recommendations: &mut Vec<Recommendation>,
256 insights: &mut Vec<String>,
257 ) {
258 for func in analysis.functions.iter().take(10) {
259 let self_pct = func.self_percent(total_time);
260 let total_pct = func.total_percent(total_time);
261
262 if self_pct < 3.0 && total_pct < 10.0 {
265 continue;
266 }
267
268 let pattern = func.performance_pattern(analysis.total_samples);
269 let (priority, issue_type, root_cause, actions, effort, savings_ratio) =
270 Self::classify_hotspot(func, &pattern, self_pct, total_pct);
271
272 let estimated_savings = (func.self_time as f64 * savings_ratio) as u64;
273
274 let mut evidence = vec![
275 format!("{:.1}% of total CPU time", self_pct),
276 format!("{} samples", func.self_samples),
277 ];
278
279 if func.is_recursive() {
280 evidence.push(format!(
281 "Recursive (max depth: {})",
282 func.max_recursion_depth
283 ));
284 }
285
286 let code_patterns = Self::suggest_code_patterns(&func.name, &func.location, issue_type);
287
288 recommendations.push(Recommendation {
289 priority,
290 effort,
291 issue_type,
292 title: Self::generate_title(func, issue_type),
293 target: func.name.clone(),
294 location: func.location.clone(),
295 current_time_us: func.self_time,
296 estimated_savings_us: estimated_savings,
297 root_cause,
298 actions,
299 code_patterns,
300 evidence,
301 });
302 }
303
304 let top3_pct: f64 = analysis
306 .functions
307 .iter()
308 .take(3)
309 .map(|f| f.self_percent(total_time))
310 .sum();
311
312 if top3_pct > 50.0 {
313 insights.push(format!(
314 "Top 3 functions account for {:.0}% of CPU time — focused optimization will have high impact",
315 top3_pct
316 ));
317 } else if top3_pct < 20.0 {
318 insights.push(
319 "CPU time is well-distributed — consider architectural improvements over micro-optimizations".to_string()
320 );
321 }
322 }
323
324 fn classify_hotspot(
325 func: &FunctionStats,
326 pattern: &PerformancePattern,
327 self_pct: f64,
328 total_pct: f64,
329 ) -> (Priority, IssueType, String, Vec<String>, Effort, f64) {
330 match pattern {
331 PerformancePattern::CriticalPath => {
332 let priority = if self_pct >= 20.0 {
333 Priority::Critical
334 } else {
335 Priority::High
336 };
337 (
338 priority,
339 IssueType::Algorithm,
340 "Called frequently AND expensive per call — this is on the critical path"
341 .to_string(),
342 vec![
343 "Profile this function in isolation to find the slow code path".to_string(),
344 "Consider algorithmic improvements (caching, better data structures)"
345 .to_string(),
346 "Look for unnecessary work that can be skipped".to_string(),
347 "Consider breaking into smaller functions to isolate the bottleneck"
348 .to_string(),
349 ],
350 Effort::Moderate,
351 0.5, )
353 }
354 PerformancePattern::ExpensiveOperation => {
355 let (priority, effort) = if self_pct >= 15.0 {
357 (Priority::High, Effort::Moderate)
358 } else if self_pct >= 5.0 {
359 (Priority::Medium, Effort::Moderate)
360 } else {
361 (Priority::Low, Effort::QuickWin)
363 };
364 (
365 priority,
366 IssueType::Algorithm,
367 "Each call is expensive but infrequent — optimize the operation itself"
368 .to_string(),
369 vec![
370 "Check for O(n²) or worse algorithms".to_string(),
371 "Look for synchronous I/O or blocking operations".to_string(),
372 "Consider lazy evaluation or streaming".to_string(),
373 "Profile memory allocations in this function".to_string(),
374 ],
375 effort,
376 0.4,
377 )
378 }
379 PerformancePattern::FrequentlyCalled => {
380 let actions = if func.category == FrameCategory::Deps {
381 vec![
382 "Reduce call frequency by batching operations".to_string(),
383 "Cache results if the function is pure".to_string(),
384 "Consider inlining or replacing with native code".to_string(),
385 ]
386 } else {
387 vec![
388 "Memoize results if inputs repeat".to_string(),
389 "Move invariant computations outside loops".to_string(),
390 "Consider batching multiple calls".to_string(),
391 "Check if calls can be eliminated entirely".to_string(),
392 ]
393 };
394 let priority = if self_pct >= 10.0 {
396 Priority::High
397 } else if self_pct >= 3.0 {
398 Priority::Medium
399 } else {
400 Priority::Low
401 };
402 (
403 priority,
404 IssueType::Caching,
405 "Called very frequently — consider caching or batching".to_string(),
406 actions,
407 Effort::QuickWin,
408 0.3,
409 )
410 }
411 PerformancePattern::Normal => {
412 if total_pct >= 20.0 {
413 (
414 Priority::Medium,
415 IssueType::Hotspot,
416 "High inclusive time suggests expensive callees".to_string(),
417 vec![
418 "Examine what this function calls".to_string(),
419 "The bottleneck may be in a callee, not this function".to_string(),
420 "Check the caller/callee analysis for this function".to_string(),
421 ],
422 Effort::Moderate,
423 0.2,
424 )
425 } else {
426 (
427 Priority::Low,
428 IssueType::Hotspot,
429 "Minor hotspot".to_string(),
430 vec![
431 "Low priority — optimize only if other issues are resolved".to_string(),
432 ],
433 Effort::Moderate,
434 0.1,
435 )
436 }
437 }
438 }
439 }
440
441 fn generate_title(func: &FunctionStats, issue_type: IssueType) -> String {
442 match issue_type {
443 IssueType::Algorithm => format!("Optimize algorithm in `{}`", func.name),
444 IssueType::Caching => format!("Add caching/memoization to `{}`", func.name),
445 IssueType::Memory => format!("Reduce allocations in `{}`", func.name),
446 IssueType::Dependency => format!("Optimize or replace `{}`", func.name),
447 IssueType::Serialization => format!("Optimize serialization in `{}`", func.name),
448 IssueType::Startup => format!("Defer or lazy-load `{}`", func.name),
449 IssueType::Recursion => format!("Convert `{}` to iterative", func.name),
450 IssueType::Hotspot => format!("Investigate `{}`", func.name),
451 }
452 }
453
454 fn suggest_code_patterns(name: &str, location: &str, issue_type: IssueType) -> Vec<String> {
455 let mut patterns = Vec::new();
456 let name_lower = name.to_lowercase();
457 let loc_lower = location.to_lowercase();
458
459 if name_lower.contains("json")
461 || name_lower.contains("parse")
462 || name_lower.contains("stringify")
463 {
464 patterns.push("JSON.parse() / JSON.stringify() calls".to_string());
465 patterns.push("Consider streaming JSON parsing for large payloads".to_string());
466 }
467
468 if name_lower.contains("regex")
470 || name_lower.contains("regexp")
471 || name_lower.contains("match")
472 {
473 patterns.push("Regular expression operations".to_string());
474 patterns.push("Compile regex once and reuse, avoid in loops".to_string());
475 }
476
477 if name_lower.contains("sort")
479 || name_lower.contains("find")
480 || name_lower.contains("search")
481 {
482 patterns.push("Sorting or searching operations".to_string());
483 patterns.push("Check if data structure supports faster lookups (Map/Set)".to_string());
484 }
485
486 if name_lower.contains("each")
488 || name_lower.contains("map")
489 || name_lower.contains("filter")
490 {
491 patterns.push("Array iteration methods".to_string());
492 patterns
493 .push("Consider early termination, or use for loop for performance".to_string());
494 }
495
496 if name_lower.contains("transform")
498 || name_lower.contains("convert")
499 || name_lower.contains("compile")
500 {
501 patterns.push("Data transformation/compilation".to_string());
502 patterns.push("Cache transformation results if inputs repeat".to_string());
503 }
504
505 if loc_lower.contains("lodash") {
507 patterns.push("Consider native alternatives to lodash functions".to_string());
508 }
509 if loc_lower.contains("moment") {
510 patterns.push("Consider lighter date libraries (date-fns, dayjs)".to_string());
511 }
512
513 match issue_type {
515 IssueType::Caching => {
516 patterns.push("Look for repeated calls with same arguments".to_string());
517 patterns.push("Check if results can be memoized with a Map/WeakMap".to_string());
518 }
519 IssueType::Memory => {
520 patterns.push("Look for object/array creation in loops".to_string());
521 patterns.push("Consider object pooling or reuse".to_string());
522 }
523 _ => {}
524 }
525
526 patterns
527 }
528
529 #[expect(clippy::cast_precision_loss)]
530 fn analyze_gc(
531 analysis: &CpuAnalysis,
532 total_time: u64,
533 recommendations: &mut Vec<Recommendation>,
534 insights: &mut Vec<String>,
535 ) {
536 let Some(gc) = &analysis.gc_analysis else {
537 return;
538 };
539
540 let gc_pct = if total_time > 0 {
541 (gc.total_time as f64 / total_time as f64) * 100.0
542 } else {
543 0.0
544 };
545
546 if gc_pct < 5.0 {
547 return;
548 }
549
550 let priority = if gc_pct >= 15.0 {
551 Priority::Critical
552 } else if gc_pct >= 10.0 {
553 Priority::High
554 } else {
555 Priority::Medium
556 };
557
558 let target_gc_time = (total_time as f64 * 0.02) as u64;
560 let potential_savings = gc.total_time.saturating_sub(target_gc_time);
561
562 insights.push(format!(
563 "GC overhead is {:.1}% — reducing allocations could save {:.1}ms",
564 gc_pct,
565 potential_savings as f64 / 1000.0
566 ));
567
568 if let Some(hotspot) = gc.allocation_hotspots.first() {
570 let hotspot_savings =
571 (potential_savings as f64 * hotspot.gc_correlation / 100.0) as u64;
572
573 recommendations.push(Recommendation {
574 priority,
575 effort: Effort::Moderate,
576 issue_type: IssueType::Memory,
577 title: format!("Reduce allocations in `{}`", hotspot.name),
578 target: hotspot.name.clone(),
579 location: hotspot.location.clone(),
580 current_time_us: gc.total_time,
581 estimated_savings_us: hotspot_savings,
582 root_cause: format!(
583 "This function appears in {:.0}% of GC samples, indicating heavy allocation",
584 hotspot.gc_correlation
585 ),
586 actions: vec![
587 "Reuse objects instead of creating new ones".to_string(),
588 "Use object pools for frequently created objects".to_string(),
589 "Avoid creating closures in loops".to_string(),
590 "Pre-allocate arrays with known size".to_string(),
591 "Use TypedArrays for numeric data".to_string(),
592 ],
593 code_patterns: vec![
594 "new Object() / {} literals in loops".to_string(),
595 "Array.push() in tight loops (pre-allocate instead)".to_string(),
596 "String concatenation (use array.join or template literals)".to_string(),
597 "Spread operator creating copies".to_string(),
598 ],
599 evidence: vec![
600 format!("{:.1}% GC overhead", gc_pct),
601 format!("{} GC events", gc.sample_count),
602 format!("~{:.0}μs average GC pause", gc.avg_pause_us),
603 ],
604 });
605 }
606 }
607
608 #[expect(clippy::cast_precision_loss)]
609 fn analyze_dependencies(
610 analysis: &CpuAnalysis,
611 total_time: u64,
612 recommendations: &mut Vec<Recommendation>,
613 insights: &mut Vec<String>,
614 ) {
615 let deps_pct = analysis.category_breakdown.percent(FrameCategory::Deps);
616
617 if deps_pct >= 40.0 {
618 insights.push(format!(
619 "Dependencies consume {:.0}% of CPU — review if all are necessary",
620 deps_pct
621 ));
622 }
623
624 for pkg in analysis.package_stats.iter().take(3) {
625 let pkg_pct = if total_time > 0 {
626 (pkg.time as f64 / total_time as f64) * 100.0
627 } else {
628 0.0
629 };
630
631 if pkg_pct < 3.0 {
632 continue;
633 }
634
635 let (actions, effort, savings_ratio) = Self::get_package_advice(&pkg.package);
636
637 recommendations.push(Recommendation {
638 priority: if pkg_pct >= 15.0 {
639 Priority::High
640 } else {
641 Priority::Medium
642 },
643 effort,
644 issue_type: IssueType::Dependency,
645 title: format!("Optimize `{}` usage", pkg.package),
646 target: pkg.package.clone(),
647 location: pkg.top_function_location.clone(),
648 current_time_us: pkg.time,
649 estimated_savings_us: (pkg.time as f64 * savings_ratio) as u64,
650 root_cause: format!(
651 "Package `{}` consumes {:.1}% of CPU time",
652 pkg.package, pkg_pct
653 ),
654 actions,
655 code_patterns: vec![
656 format!("import {{ ... }} from '{}'", pkg.package),
657 format!("require('{}')", pkg.package),
658 ],
659 evidence: vec![
660 format!("{:.1}% of total CPU time", pkg_pct),
661 format!("{:.1}% of dependency time", pkg.percent_of_deps),
662 format!("Hottest function: {}", pkg.top_function),
663 ],
664 });
665 }
666 }
667
668 fn get_package_advice(package: &str) -> (Vec<String>, Effort, f64) {
669 let pkg_lower = package.to_lowercase();
670
671 if pkg_lower.contains("lodash") {
673 return (
674 vec![
675 "Import only needed functions: `import map from 'lodash/map'`".to_string(),
676 "Consider native alternatives (Array.map, Object.keys, etc.)".to_string(),
677 "Use lodash-es for better tree-shaking".to_string(),
678 ],
679 Effort::QuickWin,
680 0.3,
681 );
682 }
683
684 if pkg_lower.contains("moment") {
685 return (
686 vec![
687 "Replace with date-fns or dayjs (10-20x smaller)".to_string(),
688 "Use native Intl.DateTimeFormat for formatting".to_string(),
689 "Avoid parsing strings repeatedly".to_string(),
690 ],
691 Effort::Moderate,
692 0.5,
693 );
694 }
695
696 if pkg_lower.contains("axios") {
697 return (
698 vec![
699 "Consider native fetch() API".to_string(),
700 "Reuse axios instances".to_string(),
701 "Check if interceptors add overhead".to_string(),
702 ],
703 Effort::Moderate,
704 0.2,
705 );
706 }
707
708 if pkg_lower.contains("babel") || pkg_lower.contains("typescript") {
709 return (
710 vec![
711 "This is build-time overhead — ensure not running in production".to_string(),
712 "Pre-compile code instead of runtime transpilation".to_string(),
713 "Check for accidental ts-node or @babel/register in prod".to_string(),
714 ],
715 Effort::QuickWin,
716 0.8,
717 );
718 }
719
720 if pkg_lower.contains("webpack")
721 || pkg_lower.contains("esbuild")
722 || pkg_lower.contains("vite")
723 {
724 return (
725 vec![
726 "Build tools should not run in production".to_string(),
727 "Check for dev dependencies imported at runtime".to_string(),
728 ],
729 Effort::QuickWin,
730 0.9,
731 );
732 }
733
734 if pkg_lower.contains("ajv") || pkg_lower.contains("joi") || pkg_lower.contains("yup") {
735 return (
736 vec![
737 "Compile schemas once, reuse validators".to_string(),
738 "Consider lighter validation for hot paths".to_string(),
739 "Skip validation in trusted internal calls".to_string(),
740 ],
741 Effort::QuickWin,
742 0.4,
743 );
744 }
745
746 (
748 vec![
749 "Check if this package is necessary".to_string(),
750 "Look for lighter alternatives".to_string(),
751 "Consider lazy-loading if not needed at startup".to_string(),
752 ],
753 Effort::Moderate,
754 0.3,
755 )
756 }
757
758 #[expect(clippy::cast_precision_loss)]
759 fn analyze_recursion(
760 analysis: &CpuAnalysis,
761 total_time: u64,
762 recommendations: &mut Vec<Recommendation>,
763 ) {
764 for rec_func in &analysis.recursive_functions {
765 let rec_pct = if total_time > 0 {
766 (rec_func.recursive_time as f64 / total_time as f64) * 100.0
767 } else {
768 0.0
769 };
770
771 if rec_pct < 3.0 || rec_func.max_depth < 5 {
772 continue;
773 }
774
775 recommendations.push(Recommendation {
776 priority: if rec_pct >= 10.0 { Priority::High } else { Priority::Medium },
777 effort: Effort::Moderate,
778 issue_type: IssueType::Recursion,
779 title: format!("Convert `{}` to iterative", rec_func.name),
780 target: rec_func.name.clone(),
781 location: rec_func.location.clone(),
782 current_time_us: rec_func.recursive_time,
783 estimated_savings_us: rec_func.recursive_time / 3, root_cause: format!(
785 "Deep recursion (max depth {}) causes stack overhead and potential stack overflow risk",
786 rec_func.max_depth
787 ),
788 actions: vec![
789 "Convert to iterative algorithm with explicit stack".to_string(),
790 "Consider tail-call optimization if applicable".to_string(),
791 "Add memoization to avoid redundant recursive calls".to_string(),
792 "Limit recursion depth with an iterative fallback".to_string(),
793 ],
794 code_patterns: vec![
795 "function f() { ... f() ... }".to_string(),
796 "Look for tree/graph traversal".to_string(),
797 ],
798 evidence: vec![
799 format!("Max recursion depth: {}", rec_func.max_depth),
800 format!("{} recursive samples", rec_func.recursive_samples),
801 format!("{:.1}% of CPU time", rec_pct),
802 ],
803 });
804 }
805 }
806
807 #[expect(clippy::cast_precision_loss)]
808 fn analyze_phases(
809 analysis: &CpuAnalysis,
810 total_time: u64,
811 recommendations: &mut Vec<Recommendation>,
812 insights: &mut Vec<String>,
813 ) {
814 let Some(phases) = &analysis.phase_analysis else {
815 return;
816 };
817
818 let startup_time = phases.startup.end_us - phases.startup.start_us;
819 let startup_pct = if total_time > 0 {
820 (startup_time as f64 / total_time as f64) * 100.0
821 } else {
822 0.0
823 };
824
825 if startup_pct >= 30.0 && startup_time > 500_000 {
826 insights.push(format!(
828 "Startup phase takes {:.0}ms ({:.0}% of profile) — consider lazy loading",
829 startup_time as f64 / 1000.0,
830 startup_pct
831 ));
832
833 if let Some(top_startup_func) = phases.startup.top_functions.first() {
834 if top_startup_func.percent >= 20.0 {
835 recommendations.push(Recommendation {
836 priority: Priority::High,
837 effort: Effort::Moderate,
838 issue_type: IssueType::Startup,
839 title: format!("Defer `{}` initialization", top_startup_func.name),
840 target: top_startup_func.name.clone(),
841 location: top_startup_func.location.clone(),
842 current_time_us: top_startup_func.self_time,
843 estimated_savings_us: top_startup_func.self_time * 8 / 10, root_cause: format!(
845 "This function takes {:.0}% of startup time",
846 top_startup_func.percent
847 ),
848 actions: vec![
849 "Lazy-load this module on first use".to_string(),
850 "Move initialization to background/idle time".to_string(),
851 "Consider code-splitting this functionality".to_string(),
852 "Defer non-critical initialization".to_string(),
853 ],
854 code_patterns: vec![
855 "Top-level await or sync initialization".to_string(),
856 "Large imports at module load time".to_string(),
857 ],
858 evidence: vec![
859 format!("{:.1}% of startup time", top_startup_func.percent),
860 format!("Startup phase: {:.0}ms", startup_time as f64 / 1000.0),
861 ],
862 });
863 }
864 }
865 }
866 }
867
868 #[expect(clippy::cast_precision_loss)]
869 fn detect_patterns(
870 profile: &ProfileIR,
871 analysis: &CpuAnalysis,
872 total_time: u64,
873 recommendations: &mut Vec<Recommendation>,
874 ) {
875 let json_funcs: Vec<_> = analysis
877 .functions
878 .iter()
879 .filter(|f| {
880 let name_lower = f.name.to_lowercase();
881 name_lower.contains("json")
882 || name_lower.contains("parse")
883 || name_lower.contains("stringify")
884 || name_lower.contains("serialize")
885 })
886 .collect();
887
888 let json_time: u64 = json_funcs.iter().map(|f| f.self_time).sum();
889 let json_pct = if total_time > 0 {
890 (json_time as f64 / total_time as f64) * 100.0
891 } else {
892 0.0
893 };
894
895 if json_pct >= 5.0 {
896 recommendations.push(Recommendation {
897 priority: if json_pct >= 15.0 {
898 Priority::High
899 } else {
900 Priority::Medium
901 },
902 effort: Effort::Moderate,
903 issue_type: IssueType::Serialization,
904 title: "Optimize JSON serialization".to_string(),
905 target: "JSON operations".to_string(),
906 location: json_funcs
907 .first()
908 .map_or("(multiple)".to_string(), |f| f.location.clone()),
909 current_time_us: json_time,
910 estimated_savings_us: json_time / 2,
911 root_cause: format!(
912 "JSON parsing/serialization consumes {:.1}% of CPU time",
913 json_pct
914 ),
915 actions: vec![
916 "Use streaming JSON parsing for large payloads".to_string(),
917 "Consider binary formats (MessagePack, Protocol Buffers)".to_string(),
918 "Cache parsed results when possible".to_string(),
919 "Avoid stringify/parse roundtrips for cloning (use structuredClone)"
920 .to_string(),
921 ],
922 code_patterns: vec![
923 "JSON.parse(JSON.stringify(obj)) for cloning".to_string(),
924 "Repeated parsing of same data".to_string(),
925 "Large object serialization".to_string(),
926 ],
927 evidence: json_funcs
928 .iter()
929 .take(3)
930 .map(|f| format!("`{}` - {:.1}%", f.name, f.self_percent(total_time)))
931 .collect(),
932 });
933 }
934
935 let regex_funcs: Vec<_> = analysis
937 .functions
938 .iter()
939 .filter(|f| {
940 let name_lower = f.name.to_lowercase();
941 name_lower.contains("regexp")
942 || name_lower.contains("regex")
943 || f.name.contains("match")
944 })
945 .collect();
946
947 let regex_time: u64 = regex_funcs.iter().map(|f| f.self_time).sum();
948 let regex_pct = if total_time > 0 {
949 (regex_time as f64 / total_time as f64) * 100.0
950 } else {
951 0.0
952 };
953
954 if regex_pct >= 3.0 {
955 recommendations.push(Recommendation {
956 priority: Priority::Medium,
957 effort: Effort::QuickWin,
958 issue_type: IssueType::Algorithm,
959 title: "Optimize regular expressions".to_string(),
960 target: "RegExp operations".to_string(),
961 location: regex_funcs
962 .first()
963 .map_or("(multiple)".to_string(), |f| f.location.clone()),
964 current_time_us: regex_time,
965 estimated_savings_us: regex_time * 2 / 3,
966 root_cause: format!("Regular expressions consume {:.1}% of CPU time", regex_pct),
967 actions: vec![
968 "Compile regex once outside loops: `const re = /pattern/`".to_string(),
969 "Use simpler string methods when possible (includes, startsWith)".to_string(),
970 "Avoid capturing groups if not needed: `(?:...)` instead of `(...)`"
971 .to_string(),
972 "Consider using non-backtracking patterns".to_string(),
973 ],
974 code_patterns: vec![
975 "/pattern/.test(str) inside loops".to_string(),
976 "new RegExp() called repeatedly".to_string(),
977 "Complex patterns with backtracking".to_string(),
978 ],
979 evidence: regex_funcs
980 .iter()
981 .take(3)
982 .map(|f| format!("`{}` - {:.1}%", f.name, f.self_percent(total_time)))
983 .collect(),
984 });
985 }
986
987 for func in analysis.functions.iter().take(5) {
989 if func.self_samples > 100 && func.avg_time_per_sample() < 100.0 {
990 let name_lower = func.name.to_lowercase();
992 if name_lower.contains("get")
993 || name_lower.contains("fetch")
994 || name_lower.contains("load")
995 || name_lower.contains("query")
996 || name_lower.contains("find")
997 {
998 let _ = profile; recommendations.push(Recommendation {
1000 priority: Priority::Medium,
1001 effort: Effort::Moderate,
1002 issue_type: IssueType::Caching,
1003 title: format!("Batch `{}` calls", func.name),
1004 target: func.name.clone(),
1005 location: func.location.clone(),
1006 current_time_us: func.self_time,
1007 estimated_savings_us: func.self_time / 2,
1008 root_cause: format!(
1009 "Called {} times — potential N+1 pattern",
1010 func.self_samples
1011 ),
1012 actions: vec![
1013 "Batch multiple calls into a single operation".to_string(),
1014 "Use DataLoader pattern for automatic batching".to_string(),
1015 "Prefetch data instead of loading on demand".to_string(),
1016 "Add caching layer to avoid repeated fetches".to_string(),
1017 ],
1018 code_patterns: vec![
1019 "Loop calling getData(id) — batch to getData(ids)".to_string(),
1020 "Multiple awaits in sequence that could be parallel".to_string(),
1021 ],
1022 evidence: vec![
1023 format!("{} calls in profile", func.self_samples),
1024 format!("{:.0}μs average per call", func.avg_time_per_sample()),
1025 ],
1026 });
1027 break; }
1029 }
1030 }
1031 }
1032
1033 fn generate_investigations(analysis: &CpuAnalysis, investigations: &mut Vec<String>) {
1034 if Self::should_include_category(FrameCategory::Native, analysis)
1039 && analysis.native_time > analysis.total_time / 5
1040 {
1041 investigations.push(
1042 "High native code time (>20%) — check native addons or C++ bindings".to_string(),
1043 );
1044 }
1045
1046 if Self::should_include_category(FrameCategory::V8Internal, analysis)
1048 && analysis.category_breakdown.v8_internal > analysis.total_time / 10
1049 {
1050 investigations.push(
1051 "Significant V8 internal time (>10%) — may indicate JIT deoptimization".to_string(),
1052 );
1053 }
1054
1055 if analysis.hot_paths.len() == 1 {
1056 investigations
1057 .push("Single dominant code path — check if other paths are expected".to_string());
1058 }
1059
1060 if analysis.functions.is_empty() {
1061 investigations.push(
1062 "No significant functions found — profile may be too short or app is I/O bound"
1063 .to_string(),
1064 );
1065 }
1066 }
1067}