use clap::{Parser, Subcommand};
use kotoba_profiler::{Profiler, ProfilingConfig};
use std::path::PathBuf;
use std::time::Duration;
#[derive(Parser)]
#[command(name = "kotoba-profiler")]
#[command(about = "Comprehensive performance profiling tool for KotobaDB")]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
Profile {
#[arg(short, long, default_value = "60")]
duration: u64,
#[arg(short, long, default_value = "/tmp/kotoba_profile.db")]
db_path: PathBuf,
#[arg(short, long, default_value = "profiling_reports")]
output_dir: String,
#[arg(long, default_value = "true")]
cpu: bool,
#[arg(long, default_value = "true")]
memory: bool,
#[arg(long, default_value = "true")]
io: bool,
#[arg(long, default_value = "true")]
query: bool,
#[arg(long, default_value = "100")]
sampling_interval_ms: u64,
},
CpuProfile {
#[arg(short, long, default_value = "30")]
duration: u64,
#[arg(short, long, default_value = "cpu_flame_graph.txt")]
output: PathBuf,
},
MemoryProfile {
#[arg(short, long, default_value = "30")]
duration: u64,
#[arg(short, long, default_value = "memory_reports")]
output_dir: String,
},
Analyze {
#[arg(short, long)]
input: PathBuf,
#[arg(short, long, default_value = "analysis_reports")]
output_dir: String,
},
Recommend {
#[arg(short, long)]
metrics: Option<PathBuf>,
#[arg(short, long, default_value = "optimization_recommendations.txt")]
output: PathBuf,
},
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli = Cli::parse();
match cli.command {
Commands::Profile {
duration,
db_path,
output_dir,
cpu,
memory,
io,
query,
sampling_interval_ms,
} => {
run_comprehensive_profiling(
duration,
db_path,
output_dir,
cpu,
memory,
io,
query,
sampling_interval_ms,
).await
}
Commands::CpuProfile { duration, output } => {
run_cpu_profiling(duration, output).await
}
Commands::MemoryProfile { duration, output_dir } => {
run_memory_profiling(duration, output_dir).await
}
Commands::Analyze { input, output_dir } => {
run_analysis(input, output_dir).await
}
Commands::Recommend { metrics, output } => {
generate_recommendations(metrics, output).await
}
}
}
async fn run_comprehensive_profiling(
duration: u64,
db_path: PathBuf,
output_dir: String,
enable_cpu: bool,
enable_memory: bool,
enable_io: bool,
enable_query: bool,
sampling_interval_ms: u64,
) -> Result<(), Box<dyn std::error::Error>> {
println!("🚀 Starting KotobaDB Comprehensive Profiling");
println!("==========================================");
println!("Duration: {}s", duration);
println!("Database: {}", db_path.display());
println!("Output: {}", output_dir);
println!("CPU Profiling: {}", if enable_cpu { "Enabled" } else { "Disabled" });
println!("Memory Profiling: {}", if enable_memory { "Enabled" } else { "Disabled" });
println!("I/O Profiling: {}", if enable_io { "Enabled" } else { "Disabled" });
println!("Query Profiling: {}", if enable_query { "Enabled" } else { "Disabled" });
println!("Sampling Interval: {}ms", sampling_interval_ms);
println!();
let config = ProfilingConfig {
enable_cpu_profiling: enable_cpu,
enable_memory_profiling: enable_memory,
enable_io_profiling: enable_io,
enable_query_profiling: enable_query,
sampling_interval: Duration::from_millis(sampling_interval_ms),
max_snapshots: 10000,
flame_graph_output: true,
};
let mut profiler = Profiler::with_config(config);
std::fs::create_dir_all(&output_dir)?;
profiler.start_profiling().await?;
println!("Running database workload simulation...");
run_workload_simulation(duration).await;
let report = profiler.stop_profiling().await?;
let report_path = std::path::Path::new(&output_dir).join("profiling_report.json");
std::fs::write(&report_path, report.to_json()?)?;
println!("📄 Profiling report saved to: {}", report_path.display());
let summary_path = std::path::Path::new(&output_dir).join("PROFILING_SUMMARY.txt");
std::fs::write(&summary_path, report.summary())?;
println!("📋 Summary saved to: {}", summary_path.display());
if let Some(flame_graph) = report.to_flame_graph() {
let flame_path = std::path::Path::new(&output_dir).join("cpu_flame_graph.txt");
std::fs::write(&flame_path, flame_graph)?;
println!("🔥 Flame graph saved to: {}", flame_path.display());
}
println!("\n✅ Profiling completed successfully!");
Ok(())
}
async fn run_cpu_profiling(duration: u64, output: PathBuf) -> Result<(), Box<dyn std::error::Error>> {
println!("🔥 Starting CPU Profiling");
println!("========================");
println!("Duration: {}s", duration);
println!("Output: {}", output.display());
println!();
let mut profiler = Profiler::with_config(ProfilingConfig {
enable_cpu_profiling: true,
enable_memory_profiling: false,
enable_io_profiling: false,
enable_query_profiling: false,
sampling_interval: Duration::from_millis(10),
max_snapshots: 10000,
flame_graph_output: true,
});
profiler.start_profiling().await?;
run_workload_simulation(duration).await;
let report = profiler.stop_profiling().await?;
if let Some(flame_graph) = report.to_flame_graph() {
std::fs::write(&output, flame_graph)?;
println!("✅ Flame graph saved to: {}", output.display());
} else {
println!("❌ CPU profiling data not available");
}
Ok(())
}
async fn run_memory_profiling(duration: u64, output_dir: String) -> Result<(), Box<dyn std::error::Error>> {
println!("💾 Starting Memory Profiling");
println!("===========================");
println!("Duration: {}s", duration);
println!("Output: {}", output_dir);
println!();
std::fs::create_dir_all(&output_dir)?;
let mut profiler = Profiler::with_config(ProfilingConfig {
enable_cpu_profiling: false,
enable_memory_profiling: true,
enable_io_profiling: false,
enable_query_profiling: false,
sampling_interval: Duration::from_millis(50),
max_snapshots: 10000,
flame_graph_output: false,
});
profiler.start_profiling().await?;
run_memory_intensive_workload(duration).await;
let report = profiler.stop_profiling().await?;
let report_path = std::path::Path::new(&output_dir).join("memory_profile.json");
std::fs::write(&report_path, report.to_json()?)?;
println!("✅ Memory profile saved to: {}", report_path.display());
Ok(())
}
async fn run_analysis(input: PathBuf, output_dir: String) -> Result<(), Box<dyn std::error::Error>> {
println!("🔍 Analyzing Profiling Data");
println!("==========================");
println!("Input: {}", input.display());
println!("Output: {}", output_dir);
println!();
let data = std::fs::read_to_string(&input)?;
let report: crate::ProfilingReport = serde_json::from_str(&data)?;
std::fs::create_dir_all(&output_dir)?;
let analysis_path = std::path::Path::new(&output_dir).join("analysis_report.json");
std::fs::write(&analysis_path, serde_json::to_string_pretty(&report)?)?;
println!("✅ Analysis report saved to: {}", analysis_path.display());
let summary_path = std::path::Path::new(&output_dir).join("ANALYSIS_SUMMARY.txt");
std::fs::write(&summary_path, report.summary())?;
println!("📋 Analysis summary saved to: {}", summary_path.display());
Ok(())
}
async fn generate_recommendations(metrics: Option<PathBuf>, output: PathBuf) -> Result<(), Box<dyn std::error::Error>> {
println!("💡 Generating Optimization Recommendations");
println!("=========================================");
let system_analysis = if let Some(metrics_path) = metrics {
let data = std::fs::read_to_string(&metrics_path)?;
Some(serde_json::from_str(&data)?)
} else {
Some(crate::system_monitor::SystemAnalysis {
monitoring_duration: Duration::from_secs(300),
average_cpu_usage: 65.0,
peak_cpu_usage: 85.0,
average_memory_usage: 72.0,
peak_memory_usage: 88.0,
total_disk_read_mb: 1024.0,
total_disk_write_mb: 512.0,
total_network_rx_mb: 256.0,
total_network_tx_mb: 128.0,
resource_trends: crate::system_monitor::ResourceTrends {
cpu_trend: crate::system_monitor::Trend::Increasing,
memory_trend: crate::system_monitor::Trend::Stable,
disk_trend: crate::system_monitor::Trend::Stable,
network_trend: crate::system_monitor::Trend::Stable,
},
bottlenecks: vec![
crate::system_monitor::SystemBottleneck {
resource_type: crate::system_monitor::ResourceType::Cpu,
severity: crate::system_monitor::Severity::Medium,
description: "CPU usage trending upward".to_string(),
utilization_percent: 75.0,
duration_seconds: 120.0,
}
],
utilization_patterns: crate::system_monitor::UtilizationPatterns {
peak_hours: vec![9, 10, 11, 14, 15, 16],
cpu_spike_frequency: 0.15,
memory_growth_rate: 2.5,
io_burst_pattern: true,
network_burst_pattern: false,
},
recommendations: vec!["Monitor CPU usage trends".to_string()],
})
};
let advisor = crate::performance_advisor::PerformanceAdvisor::new();
let bottlenecks = advisor.identify_bottlenecks(
&None, &None, &None, &None, &system_analysis,
).await;
let recommendations = advisor.generate_recommendations(&bottlenecks, &system_analysis).await;
let mut content = format!("KotobaDB Optimization Recommendations\n");
content.push_str(&format!("Generated: {}\n\n", chrono::Utc::now().format("%Y-%m-%d %H:%M:%S UTC")));
if !bottlenecks.is_empty() {
content.push_str("Identified Bottlenecks:\n");
content.push_str("=====================\n");
for bottleneck in &bottlenecks {
content.push_str(&format!("• {} ({}): {}\n",
bottleneck.bottleneck_type, bottleneck.severity, bottleneck.description));
}
content.push_str("\n");
}
if !recommendations.is_empty() {
content.push_str("Optimization Recommendations:\n");
content.push_str("=============================\n");
for rec in &recommendations {
content.push_str(&format!("Priority: {:?} | Category: {:?} | Effort: {:?}\n",
rec.priority, rec.category, rec.implementation_effort));
content.push_str(&format!("Title: {}\n", rec.title));
content.push_str(&format!("Description: {}\n", rec.description));
content.push_str(&format!("Expected Impact: {:.1}%\n", rec.expected_impact * 100.0));
content.push_str("Actions:\n");
for action in &rec.actions {
content.push_str(&format!(" - {}\n", action));
}
content.push_str("\n");
}
} else {
content.push_str("No specific recommendations. System appears to be performing well.\n");
}
std::fs::write(&output, content)?;
println!("✅ Recommendations saved to: {}", output.display());
Ok(())
}
async fn run_workload_simulation(duration_secs: u64) {
use rand::Rng;
let start_time = std::time::Instant::now();
let mut rng = rand::thread_rng();
println!("Running workload simulation for {} seconds...", duration_secs);
while start_time.elapsed().as_secs() < duration_secs {
let operation_type = rng.gen_range(0..10);
match operation_type {
0..=3 => {
let mut data: Vec<i32> = (0..1000).map(|_| rng.gen()).collect();
data.sort();
tokio::time::sleep(Duration::from_millis(rng.gen_range(1..5))).await;
}
4..=6 => {
let mut allocations = Vec::new();
for _ in 0..100 {
allocations.push(vec![0u8; rng.gen_range(1024..10240)]);
}
drop(allocations);
tokio::time::sleep(Duration::from_millis(rng.gen_range(1..3))).await;
}
7..=8 => {
tokio::time::sleep(Duration::from_millis(rng.gen_range(5..20))).await;
}
_ => {
tokio::time::sleep(Duration::from_millis(rng.gen_range(2..10))).await;
}
}
}
println!("Workload simulation completed");
}
async fn run_memory_intensive_workload(duration_secs: u64) {
use rand::Rng;
let start_time = std::time::Instant::now();
let mut rng = rand::thread_rng();
println!("Running memory-intensive workload simulation...");
let mut allocations = Vec::new();
while start_time.elapsed().as_secs() < duration_secs {
match rng.gen_range(0..5) {
0 => {
allocations.push(vec![0u8; 1024 * 1024]); }
1 => {
for _ in 0..1000 {
allocations.push(vec![0u8; rng.gen_range(64..1024)]);
}
}
2 => {
for _ in 0..100 {
allocations.push(format!("Memory test string {}", rng.gen::<u64>()).into_bytes());
}
}
3 => {
if allocations.len() > 100 {
allocations.drain(0..50);
}
}
_ => {
let mut map = std::collections::HashMap::new();
for i in 0..100 {
map.insert(format!("key_{}", i), vec![rng.gen::<u8>(); 100]);
}
allocations.push(serde_json::to_vec(&map).unwrap_or_default());
}
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
drop(allocations);
println!("Memory-intensive workload simulation completed");
}