use std::collections::{HashMap, VecDeque};
use std::sync::Arc;
use tokio::sync::Mutex;
use crate::error::Error;
pub struct PerformanceTracker {
real_time_metrics: Arc<Mutex<RealTimeMetrics>>,
performance_history: Arc<Mutex<VecDeque<PerformanceRecord>>>,
resource_tracker: Arc<Mutex<ResourceUtilizationTracker>>,
optimizer: Arc<Mutex<PerformanceOptimizer>>,
config: PerformanceTrackerConfig,
}
impl Default for PerformanceTracker {
fn default() -> Self {
Self::new()
}
}
impl PerformanceTracker {
pub fn new() -> Self {
Self {
real_time_metrics: Arc::new(Mutex::new(RealTimeMetrics::new())),
performance_history: Arc::new(Mutex::new(VecDeque::new())),
resource_tracker: Arc::new(Mutex::new(ResourceUtilizationTracker::new())),
optimizer: Arc::new(Mutex::new(PerformanceOptimizer::new())),
config: PerformanceTrackerConfig::default(),
}
}
pub async fn record_tool_call(
&self,
tool_call_id: u32,
duration_ms: u64,
cost: f64,
) -> Result<(), Error> {
let timestamp = chrono::Utc::now();
{
let mut metrics = self.real_time_metrics.lock().await;
metrics.record_tool_call(tool_call_id, duration_ms, cost, timestamp);
}
{
let mut tracker = self.resource_tracker.lock().await;
tracker.record_tool_call(duration_ms).await;
}
{
let mut optimizer = self.optimizer.lock().await;
optimizer
.analyze_tool_call(tool_call_id, duration_ms, cost)
.await;
}
tracing::debug!(
"Recorded tool call {}: {}ms, ${:.4}",
tool_call_id,
duration_ms,
cost
);
Ok(())
}
pub async fn record_operation(
&self,
operation_name: &str,
tool_calls_used: u32,
duration_ms: u64,
success: bool,
metadata: serde_json::Value,
) -> Result<(), Error> {
let timestamp = chrono::Utc::now();
let record = PerformanceRecord {
id: format!("op_{}_{}", operation_name, timestamp.timestamp()),
operation_name: operation_name.to_string(),
timestamp,
tool_calls_used,
duration_ms,
success,
metadata,
};
{
let mut history = self.performance_history.lock().await;
history.push_back(record.clone());
if history.len() > self.config.max_history_records {
history.pop_front();
}
}
{
let mut metrics = self.real_time_metrics.lock().await;
metrics.record_operation(&record);
}
{
let mut optimizer = self.optimizer.lock().await;
optimizer.analyze_operation(&record).await;
}
tracing::info!(
"Recorded operation '{}': {} tool calls, {}ms, success: {}",
operation_name,
tool_calls_used,
duration_ms,
success
);
Ok(())
}
pub async fn get_real_time_metrics(&self) -> Result<RealTimeMetrics, Error> {
let metrics = self.real_time_metrics.lock().await;
Ok(metrics.clone())
}
pub async fn get_performance_summary(
&self,
window_duration: chrono::Duration,
) -> Result<PerformanceSummary, Error> {
let now = chrono::Utc::now();
let cutoff = now - window_duration;
let history = self.performance_history.lock().await;
let recent_records: Vec<_> = history
.iter()
.filter(|record| record.timestamp >= cutoff)
.cloned()
.collect();
if recent_records.is_empty() {
return Ok(PerformanceSummary::empty());
}
let total_tool_calls: u32 = recent_records.iter().map(|r| r.tool_calls_used).sum();
let total_duration_ms: u64 = recent_records.iter().map(|r| r.duration_ms).sum();
let success_count = recent_records.iter().filter(|r| r.success).count();
let total_records = recent_records.len();
let avg_tool_calls_per_operation = total_tool_calls as f64 / total_records as f64;
let avg_duration_per_operation = total_duration_ms as f64 / total_records as f64;
let success_rate = success_count as f64 / total_records as f64;
let minutes = window_duration.num_minutes() as f64;
let throughput_per_minute = if minutes > 0.0 {
total_records as f64 / minutes
} else {
0.0
};
let efficiency_score = self.calculate_efficiency_score(&recent_records);
let bottlenecks = self.identify_bottlenecks(&recent_records).await;
Ok(PerformanceSummary {
time_window: window_duration,
total_operations: total_records,
total_tool_calls,
total_duration_ms,
avg_tool_calls_per_operation,
avg_duration_per_operation,
success_rate,
throughput_per_minute,
efficiency_score,
bottlenecks,
top_performing_operations: self.get_top_performing_operations(&recent_records),
recommendations: self
.generate_optimization_recommendations(&recent_records)
.await,
})
}
pub async fn get_resource_utilization(&self) -> Result<ResourceUtilization, Error> {
let tracker = self.resource_tracker.lock().await;
Ok(tracker.get_utilization_stats())
}
pub async fn get_optimization_recommendations(
&self,
) -> Result<Vec<OptimizationRecommendation>, Error> {
let optimizer = self.optimizer.lock().await;
Ok(optimizer.get_recommendations().await)
}
pub async fn reset(&self) -> Result<(), Error> {
{
let mut metrics = self.real_time_metrics.lock().await;
metrics.reset();
}
{
let mut history = self.performance_history.lock().await;
history.clear();
}
{
let mut tracker = self.resource_tracker.lock().await;
tracker.reset();
}
{
let mut optimizer = self.optimizer.lock().await;
optimizer.reset();
}
tracing::info!("Performance tracker reset");
Ok(())
}
fn calculate_efficiency_score(&self, records: &[PerformanceRecord]) -> f64 {
if records.is_empty() {
return 1.0;
}
let success_rate =
records.iter().filter(|r| r.success).count() as f64 / records.len() as f64;
let avg_tool_calls =
records.iter().map(|r| r.tool_calls_used).sum::<u32>() as f64 / records.len() as f64;
let tool_call_efficiency = (avg_tool_calls / 100.0).min(1.0);
let avg_duration =
records.iter().map(|r| r.duration_ms).sum::<u64>() as f64 / records.len() as f64;
let time_efficiency = (60000.0 / avg_duration).min(1.0);
(success_rate * 0.4 + tool_call_efficiency * 0.3 + time_efficiency * 0.3).clamp(0.0, 1.0)
}
async fn identify_bottlenecks(
&self,
records: &[PerformanceRecord],
) -> Vec<PerformanceBottleneck> {
let mut bottlenecks = Vec::new();
let avg_duration =
records.iter().map(|r| r.duration_ms).sum::<u64>() as f64 / records.len() as f64;
let slow_operations: Vec<_> = records
.iter()
.filter(|r| r.duration_ms as f64 > avg_duration * 2.0)
.collect();
if !slow_operations.is_empty() {
bottlenecks.push(PerformanceBottleneck {
type_: BottleneckType::SlowOperations,
severity: if slow_operations.len() as f64 / records.len() as f64 > 0.3 {
BottleneckSeverity::High
} else {
BottleneckSeverity::Medium
},
description: format!(
"{} operations are taking longer than average ({}ms avg)",
slow_operations.len(),
avg_duration as u64
),
affected_operations: slow_operations
.iter()
.map(|r| r.operation_name.clone())
.collect(),
});
}
let avg_tool_calls =
records.iter().map(|r| r.tool_calls_used).sum::<u32>() as f64 / records.len() as f64;
let high_tool_call_ops: Vec<_> = records
.iter()
.filter(|r| r.tool_calls_used as f64 > avg_tool_calls * 2.0)
.collect();
if !high_tool_call_ops.is_empty() {
bottlenecks.push(PerformanceBottleneck {
type_: BottleneckType::ExcessiveToolCalls,
severity: if high_tool_call_ops.len() as f64 / records.len() as f64 > 0.2 {
BottleneckSeverity::High
} else {
BottleneckSeverity::Low
},
description: format!(
"{} operations use excessive tool calls ({} avg)",
high_tool_call_ops.len(),
avg_tool_calls
),
affected_operations: high_tool_call_ops
.iter()
.map(|r| r.operation_name.clone())
.collect(),
});
}
let failure_rate =
records.iter().filter(|r| !r.success).count() as f64 / records.len() as f64;
if failure_rate > 0.1 {
bottlenecks.push(PerformanceBottleneck {
type_: BottleneckType::HighFailureRate,
severity: if failure_rate > 0.3 {
BottleneckSeverity::Critical
} else {
BottleneckSeverity::High
},
description: format!("High failure rate: {:.1}%", failure_rate * 100.0),
affected_operations: records
.iter()
.filter(|r| !r.success)
.map(|r| r.operation_name.clone())
.collect(),
});
}
bottlenecks
}
fn get_top_performing_operations(
&self,
records: &[PerformanceRecord],
) -> Vec<OperationPerformance> {
let mut operations: HashMap<String, Vec<&PerformanceRecord>> = HashMap::new();
for record in records {
operations
.entry(record.operation_name.clone())
.or_default()
.push(record);
}
let mut performance_data = Vec::new();
for (op_name, op_records) in operations {
let total_calls: u32 = op_records.iter().map(|r| r.tool_calls_used).sum();
let total_duration: u64 = op_records.iter().map(|r| r.duration_ms).sum();
let success_count = op_records.iter().filter(|r| r.success).count();
let total_ops = op_records.len();
performance_data.push(OperationPerformance {
operation_name: op_name,
total_executions: total_ops,
avg_tool_calls: total_calls as f64 / total_ops as f64,
avg_duration_ms: total_duration as f64 / total_ops as f64,
success_rate: success_count as f64 / total_ops as f64,
efficiency_score: self.calculate_operation_efficiency(&op_records),
});
}
performance_data
.sort_by(|a, b| b.efficiency_score.partial_cmp(&a.efficiency_score).unwrap());
performance_data.into_iter().take(5).collect()
}
fn calculate_operation_efficiency(&self, records: &[&PerformanceRecord]) -> f64 {
if records.is_empty() {
return 0.0;
}
let success_rate =
records.iter().filter(|r| r.success).count() as f64 / records.len() as f64;
let avg_duration =
records.iter().map(|r| r.duration_ms).sum::<u64>() as f64 / records.len() as f64;
let avg_tool_calls =
records.iter().map(|r| r.tool_calls_used).sum::<u32>() as f64 / records.len() as f64;
let duration_score = (10000.0 / avg_duration).min(1.0);
let tool_call_score = (50.0 / avg_tool_calls).min(1.0);
success_rate * 0.5 + duration_score * 0.3 + tool_call_score * 0.2
}
async fn generate_optimization_recommendations(
&self,
records: &[PerformanceRecord],
) -> Vec<OptimizationRecommendation> {
let mut recommendations = Vec::new();
let avg_duration =
records.iter().map(|r| r.duration_ms).sum::<u64>() as f64 / records.len() as f64;
let avg_tool_calls =
records.iter().map(|r| r.tool_calls_used).sum::<u32>() as f64 / records.len() as f64;
if avg_duration > 30000.0 {
recommendations.push(OptimizationRecommendation {
priority: RecommendationPriority::High,
category: RecommendationCategory::Performance,
title: "Optimize Operation Duration".to_string(),
description: format!(
"Average operation duration is {:.1} seconds. Consider optimizing algorithms or parallelizing operations.",
avg_duration / 1000.0
),
estimated_impact: ImpactLevel::High,
implementation_effort: ImplementationEffort::Medium,
});
}
if avg_tool_calls > 50.0 {
recommendations.push(OptimizationRecommendation {
priority: RecommendationPriority::Medium,
category: RecommendationCategory::ToolEfficiency,
title: "Reduce Tool Call Count".to_string(),
description: format!(
"Average tool calls per operation is {:.1}. Consider batching operations or caching results.",
avg_tool_calls
),
estimated_impact: ImpactLevel::Medium,
implementation_effort: ImplementationEffort::Low,
});
}
let operation_counts: HashMap<String, usize> = records
.iter()
.map(|r| (r.operation_name.clone(), 1))
.fold(HashMap::new(), |mut acc, (name, count)| {
*acc.entry(name).or_insert(0) += count;
acc
});
let frequently_used_ops: Vec<_> = operation_counts
.iter()
.filter(|(_, count)| **count >= 5)
.map(|(name, _)| name)
.collect();
if !frequently_used_ops.is_empty() {
recommendations.push(OptimizationRecommendation {
priority: RecommendationPriority::Medium,
category: RecommendationCategory::Caching,
title: "Implement Result Caching".to_string(),
description: format!(
"Operations {} are executed frequently. Consider implementing result caching to improve performance.",
frequently_used_ops.iter().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
),
estimated_impact: ImpactLevel::High,
implementation_effort: ImplementationEffort::Low,
});
}
recommendations
}
}
#[derive(Debug, Clone)]
pub struct RealTimeMetrics {
pub current_tool_call_id: u32,
pub total_tool_calls_today: u32,
pub avg_duration_ms: f64,
pub avg_cost_per_call: f64,
pub current_throughput_per_minute: f64,
pub error_rate: f64,
pub last_update: chrono::DateTime<chrono::Utc>,
pub rolling_window: VecDeque<PerformanceSample>,
}
impl Default for RealTimeMetrics {
fn default() -> Self {
Self::new()
}
}
impl RealTimeMetrics {
pub fn new() -> Self {
Self {
current_tool_call_id: 0,
total_tool_calls_today: 0,
avg_duration_ms: 0.0,
avg_cost_per_call: 0.0,
current_throughput_per_minute: 0.0,
error_rate: 0.0,
last_update: chrono::Utc::now(),
rolling_window: VecDeque::new(),
}
}
pub fn record_tool_call(
&mut self,
tool_call_id: u32,
duration_ms: u64,
cost: f64,
timestamp: chrono::DateTime<chrono::Utc>,
) {
self.current_tool_call_id = tool_call_id;
self.total_tool_calls_today += 1;
self.last_update = timestamp;
let sample = PerformanceSample {
timestamp,
duration_ms,
cost,
success: true, };
self.rolling_window.push_back(sample);
if self.rolling_window.len() > 100 {
self.rolling_window.pop_front();
}
let total_samples = self.rolling_window.len() as f64;
self.avg_duration_ms = self
.rolling_window
.iter()
.map(|s| s.duration_ms as f64)
.sum::<f64>()
/ total_samples;
self.avg_cost_per_call =
self.rolling_window.iter().map(|s| s.cost).sum::<f64>() / total_samples;
if let (Some(first), Some(last)) = (self.rolling_window.front(), self.rolling_window.back())
{
let time_span = (last.timestamp - first.timestamp).num_seconds() as f64;
if time_span > 0.0 {
self.current_throughput_per_minute = (total_samples * 60.0) / time_span;
}
}
}
pub fn record_operation(&mut self, record: &PerformanceRecord) {
let total_ops = self.rolling_window.len() as f64 + 1.0;
let successful_ops = self.rolling_window.iter().filter(|s| s.success).count() as f64
+ if record.success { 1.0 } else { 0.0 };
self.error_rate = 1.0 - (successful_ops / total_ops);
}
pub fn reset(&mut self) {
self.current_tool_call_id = 0;
self.total_tool_calls_today = 0;
self.avg_duration_ms = 0.0;
self.avg_cost_per_call = 0.0;
self.current_throughput_per_minute = 0.0;
self.error_rate = 0.0;
self.rolling_window.clear();
}
}
#[derive(Debug, Clone)]
pub struct PerformanceSample {
timestamp: chrono::DateTime<chrono::Utc>,
duration_ms: u64,
cost: f64,
success: bool,
}
#[derive(Debug)]
struct ResourceUtilizationTracker {
memory_usage_mb: VecDeque<f64>,
cpu_usage_percent: VecDeque<f64>,
network_io_mb: VecDeque<f64>,
disk_io_mb: VecDeque<f64>,
peak_memory_mb: f64,
peak_cpu_percent: f64,
}
impl ResourceUtilizationTracker {
fn new() -> Self {
Self {
memory_usage_mb: VecDeque::new(),
cpu_usage_percent: VecDeque::new(),
network_io_mb: VecDeque::new(),
disk_io_mb: VecDeque::new(),
peak_memory_mb: 0.0,
peak_cpu_percent: 0.0,
}
}
async fn record_tool_call(&mut self, duration_ms: u64) {
let simulated_memory = 100.0 + (duration_ms as f64 / 1000.0) * 10.0;
let simulated_cpu = 10.0 + (duration_ms as f64 / 1000.0) * 5.0;
let simulated_network = duration_ms as f64 / 1000.0 * 2.0;
let simulated_disk = duration_ms as f64 / 1000.0 * 1.0;
self.memory_usage_mb.push_back(simulated_memory);
self.cpu_usage_percent.push_back(simulated_cpu);
self.network_io_mb.push_back(simulated_network);
self.disk_io_mb.push_back(simulated_disk);
let max_samples = 50;
if self.memory_usage_mb.len() > max_samples {
self.memory_usage_mb.pop_front();
self.cpu_usage_percent.pop_front();
self.network_io_mb.pop_front();
self.disk_io_mb.pop_front();
}
self.peak_memory_mb = self.peak_memory_mb.max(simulated_memory);
self.peak_cpu_percent = self.peak_cpu_percent.max(simulated_cpu);
}
fn get_utilization_stats(&self) -> ResourceUtilization {
let avg_memory = if !self.memory_usage_mb.is_empty() {
self.memory_usage_mb.iter().sum::<f64>() / self.memory_usage_mb.len() as f64
} else {
0.0
};
let avg_cpu = if !self.cpu_usage_percent.is_empty() {
self.cpu_usage_percent.iter().sum::<f64>() / self.cpu_usage_percent.len() as f64
} else {
0.0
};
let avg_network = if !self.network_io_mb.is_empty() {
self.network_io_mb.iter().sum::<f64>() / self.network_io_mb.len() as f64
} else {
0.0
};
let avg_disk = if !self.disk_io_mb.is_empty() {
self.disk_io_mb.iter().sum::<f64>() / self.disk_io_mb.len() as f64
} else {
0.0
};
ResourceUtilization {
avg_memory_usage_mb: avg_memory,
peak_memory_usage_mb: self.peak_memory_mb,
avg_cpu_usage_percent: avg_cpu,
peak_cpu_usage_percent: self.peak_cpu_percent,
avg_network_io_mb_per_call: avg_network,
avg_disk_io_mb_per_call: avg_disk,
memory_efficiency: if self.peak_memory_mb > 0.0 {
avg_memory / self.peak_memory_mb
} else {
1.0
},
}
}
fn reset(&mut self) {
self.memory_usage_mb.clear();
self.cpu_usage_percent.clear();
self.network_io_mb.clear();
self.disk_io_mb.clear();
self.peak_memory_mb = 0.0;
self.peak_cpu_percent = 0.0;
}
}
#[derive(Debug)]
struct PerformanceOptimizer {
analysis_cache: HashMap<String, PerformanceAnalysis>,
optimization_history: VecDeque<OptimizationAction>,
}
impl PerformanceOptimizer {
fn new() -> Self {
Self {
analysis_cache: HashMap::new(),
optimization_history: VecDeque::new(),
}
}
async fn analyze_tool_call(&mut self, _tool_call_id: u32, duration_ms: u64, cost: f64) {
if duration_ms > 10_000 {
tracing::warn!("Slow tool call detected: {}ms", duration_ms);
}
if cost > 0.01 {
tracing::info!("High-cost tool call: ${:.4}", cost);
}
}
async fn analyze_operation(&mut self, record: &PerformanceRecord) {
let analysis_key = format!(
"{}_{}",
record.operation_name,
record.timestamp.date_naive()
);
let analysis = PerformanceAnalysis {
operation_name: record.operation_name.clone(),
avg_duration_ms: record.duration_ms as f64,
avg_tool_calls: record.tool_calls_used as f64,
success_rate: if record.success { 1.0 } else { 0.0 },
last_analysis: chrono::Utc::now(),
};
self.analysis_cache.insert(analysis_key, analysis);
}
async fn get_recommendations(&self) -> Vec<OptimizationRecommendation> {
let mut recommendations = Vec::new();
for analysis in self.analysis_cache.values() {
if analysis.avg_duration_ms > 30000.0 {
recommendations.push(OptimizationRecommendation {
priority: RecommendationPriority::High,
category: RecommendationCategory::Performance,
title: format!("Optimize {} duration", analysis.operation_name),
description: format!(
"Operation '{}' averages {:.1} seconds. Consider optimization.",
analysis.operation_name,
analysis.avg_duration_ms / 1000.0
),
estimated_impact: ImpactLevel::High,
implementation_effort: ImplementationEffort::Medium,
});
}
if analysis.avg_tool_calls > 30.0 {
recommendations.push(OptimizationRecommendation {
priority: RecommendationPriority::Medium,
category: RecommendationCategory::ToolEfficiency,
title: format!("Reduce {} tool calls", analysis.operation_name),
description: format!(
"Operation '{}' uses {:.1} tool calls on average.",
analysis.operation_name, analysis.avg_tool_calls
),
estimated_impact: ImpactLevel::Medium,
implementation_effort: ImplementationEffort::Low,
});
}
}
recommendations
}
fn reset(&mut self) {
self.analysis_cache.clear();
self.optimization_history.clear();
}
}
#[derive(Debug, Clone)]
pub struct PerformanceRecord {
pub id: String,
pub operation_name: String,
pub timestamp: chrono::DateTime<chrono::Utc>,
pub tool_calls_used: u32,
pub duration_ms: u64,
pub success: bool,
pub metadata: serde_json::Value,
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
struct PerformanceAnalysis {
operation_name: String,
avg_duration_ms: f64,
avg_tool_calls: f64,
success_rate: f64,
last_analysis: chrono::DateTime<chrono::Utc>,
}
#[derive(Debug, Clone)]
pub struct PerformanceSummary {
pub time_window: chrono::Duration,
pub total_operations: usize,
pub total_tool_calls: u32,
pub total_duration_ms: u64,
pub avg_tool_calls_per_operation: f64,
pub avg_duration_per_operation: f64,
pub success_rate: f64,
pub throughput_per_minute: f64,
pub efficiency_score: f64,
pub bottlenecks: Vec<PerformanceBottleneck>,
pub top_performing_operations: Vec<OperationPerformance>,
pub recommendations: Vec<OptimizationRecommendation>,
}
impl PerformanceSummary {
pub fn empty() -> Self {
Self {
time_window: chrono::Duration::minutes(0),
total_operations: 0,
total_tool_calls: 0,
total_duration_ms: 0,
avg_tool_calls_per_operation: 0.0,
avg_duration_per_operation: 0.0,
success_rate: 1.0,
throughput_per_minute: 0.0,
efficiency_score: 1.0,
bottlenecks: Vec::new(),
top_performing_operations: Vec::new(),
recommendations: Vec::new(),
}
}
}
#[derive(Debug, Clone)]
pub struct PerformanceBottleneck {
pub type_: BottleneckType,
pub severity: BottleneckSeverity,
pub description: String,
pub affected_operations: Vec<String>,
}
#[derive(Debug, Clone, PartialEq)]
pub enum BottleneckType {
SlowOperations,
ExcessiveToolCalls,
HighFailureRate,
ResourceContention,
NetworkLatency,
}
#[derive(Debug, Clone, PartialEq)]
pub enum BottleneckSeverity {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone)]
pub struct OperationPerformance {
pub operation_name: String,
pub total_executions: usize,
pub avg_tool_calls: f64,
pub avg_duration_ms: f64,
pub success_rate: f64,
pub efficiency_score: f64,
}
#[derive(Debug, Clone)]
pub struct OptimizationRecommendation {
pub priority: RecommendationPriority,
pub category: RecommendationCategory,
pub title: String,
pub description: String,
pub estimated_impact: ImpactLevel,
pub implementation_effort: ImplementationEffort,
}
#[derive(Debug, Clone, PartialEq)]
pub enum RecommendationPriority {
Low,
Medium,
High,
Critical,
}
#[derive(Debug, Clone, PartialEq)]
pub enum RecommendationCategory {
Performance,
ToolEfficiency,
Caching,
ResourceManagement,
ErrorHandling,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ImpactLevel {
Low,
Medium,
High,
VeryHigh,
}
#[derive(Debug, Clone, PartialEq)]
pub enum ImplementationEffort {
Low,
Medium,
High,
}
#[derive(Debug, Clone)]
pub struct ResourceUtilization {
pub avg_memory_usage_mb: f64,
pub peak_memory_usage_mb: f64,
pub avg_cpu_usage_percent: f64,
pub peak_cpu_usage_percent: f64,
pub avg_network_io_mb_per_call: f64,
pub avg_disk_io_mb_per_call: f64,
pub memory_efficiency: f64,
}
#[derive(Debug, Clone)]
#[allow(dead_code)]
struct OptimizationAction {
timestamp: chrono::DateTime<chrono::Utc>,
action_type: String,
target: String,
expected_improvement: f64,
actual_improvement: Option<f64>,
}
#[derive(Debug, Clone)]
pub struct PerformanceTrackerConfig {
pub max_history_records: usize,
pub rolling_window_size: usize,
pub enable_real_time_monitoring: bool,
pub enable_resource_tracking: bool,
pub enable_optimization: bool,
pub analysis_interval_minutes: u32,
}
impl Default for PerformanceTrackerConfig {
fn default() -> Self {
Self {
max_history_records: 10000,
rolling_window_size: 100,
enable_real_time_monitoring: true,
enable_resource_tracking: true,
enable_optimization: true,
analysis_interval_minutes: 5,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_performance_tracker_creation() {
let tracker = PerformanceTracker::new();
assert!(tracker.record_tool_call(1, 1000, 0.001).await.is_ok());
}
#[tokio::test]
async fn test_real_time_metrics() {
let mut metrics = RealTimeMetrics::new();
metrics.record_tool_call(1, 2000, 0.002, chrono::Utc::now());
assert_eq!(metrics.total_tool_calls_today, 1);
assert_eq!(metrics.avg_duration_ms, 2000.0);
}
#[tokio::test]
async fn test_performance_summary() {
let tracker = PerformanceTracker::new();
let _record = PerformanceRecord {
id: "test1".to_string(),
operation_name: "test_op".to_string(),
timestamp: chrono::Utc::now(),
tool_calls_used: 10,
duration_ms: 5000,
success: true,
metadata: serde_json::json!({}),
};
assert!(tracker
.record_operation("test_op", 10, 5000, true, serde_json::json!({}))
.await
.is_ok());
}
}