use anyhow::Result;
use std::sync::Arc;
use std::time::Duration;
use vtcode_core::core::memory_pool::{MemoryPool, global_pool};
use vtcode_core::core::optimized_agent::OptimizedAgentEngine;
use vtcode_core::llm::optimized_client::OptimizedLLMClient;
use vtcode_core::tools::ToolCallRequest;
use vtcode_core::tools::async_pipeline::{
AsyncToolPipeline, ExecutionContext, ExecutionPriority, ToolRequest,
};
use vtcode_core::tools::optimized_registry::{OptimizedToolRegistry, ToolMetadata};
#[tokio::test]
async fn test_memory_pool_performance() -> Result<()> {
let pool = MemoryPool::new();
let mut strings = Vec::new();
for _ in 0..100 {
strings.push(pool.get_string());
}
for s in strings {
pool.return_string(s);
}
let reused_string = pool.get_string();
assert!(reused_string.is_empty());
Ok(())
}
#[tokio::test]
async fn test_optimized_tool_registry() -> Result<()> {
let registry = OptimizedToolRegistry::new(4);
let metadata = ToolMetadata {
name: "test_tool".to_string(),
description: "Test tool for optimization".to_string(),
parameters: serde_json::json!({"type": "object"}),
is_cached: false,
avg_execution_time_ms: 100,
};
registry.register_tool(metadata);
let retrieved = registry.get_tool_metadata("test_tool");
assert!(retrieved.is_some());
assert_eq!(retrieved.unwrap().name, "test_tool");
let result = registry
.execute_tool_optimized("test_tool", serde_json::json!({"input": "test"}))
.await;
assert!(result.is_ok());
assert_eq!(registry.get_stats_snapshot().len(), 1);
Ok(())
}
#[tokio::test]
async fn test_async_tool_pipeline() -> Result<()> {
let mut pipeline = AsyncToolPipeline::new(
4, 100, 5, Duration::from_millis(100), );
pipeline.start().await?;
let mut request_ids = Vec::new();
for i in 0..10 {
let request = ToolRequest {
call: ToolCallRequest {
id: format!("test_request_{}", i),
tool_name: "test_tool".to_string(),
args: serde_json::json!({"index": i}),
metadata: None,
},
priority: ExecutionPriority::Normal,
timeout: Duration::from_secs(5),
context: ExecutionContext {
session_id: "test_session".to_string(),
user_id: None,
workspace_path: "/tmp".to_string(),
parent_request_id: None,
},
};
let request_id = pipeline.submit_request(request).await?;
request_ids.push(request_id);
}
tokio::time::sleep(Duration::from_millis(500)).await;
let metrics = pipeline.get_metrics().await;
assert!(metrics.total_requests >= 10);
pipeline.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn test_async_tool_pipeline_prevents_duplicate_start() -> Result<()> {
let mut pipeline = AsyncToolPipeline::new(2, 16, 2, Duration::from_millis(50));
pipeline.start().await?;
let second_start = pipeline.start().await;
assert!(second_start.is_err());
pipeline.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn test_async_tool_pipeline_restart_after_shutdown() -> Result<()> {
let mut pipeline = AsyncToolPipeline::new(2, 16, 2, Duration::from_millis(50));
pipeline.start().await?;
pipeline.shutdown().await?;
pipeline.start().await?;
pipeline.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn test_optimized_llm_client() -> Result<()> {
let client = OptimizedLLMClient::new(
4, 50, 10.0, 20, );
client.start().await?;
client.start().await?;
let metrics = client.get_metrics().await;
assert_eq!(metrics.total_requests, 0);
client.shutdown().await?;
client.shutdown().await?;
client.start().await?;
client.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn test_optimized_agent_engine() -> Result<()> {
let tool_pipeline = Arc::new(AsyncToolPipeline::new(
4,
100,
5,
Duration::from_millis(100),
));
let _engine = OptimizedAgentEngine::new("test_session".to_string(), tool_pipeline);
Ok(())
}
#[tokio::test]
async fn test_global_memory_pool() -> Result<()> {
let pool = global_pool();
let mut handles = Vec::new();
for i in 0..10 {
let pool_clone = Arc::clone(&pool);
let handle = tokio::spawn(async move {
let mut s = pool_clone.get_string();
s.push_str(&format!("test_{}", i));
tokio::time::sleep(Duration::from_millis(10)).await;
pool_clone.return_string(s);
});
handles.push(handle);
}
for handle in handles {
handle.await?;
}
Ok(())
}
#[tokio::test]
async fn test_optimization_integration() -> Result<()> {
let pool = global_pool();
let test_string = pool.get_string();
let registry = OptimizedToolRegistry::new(2);
let metadata = ToolMetadata {
name: "integration_test".to_string(),
description: "Integration test tool".to_string(),
parameters: serde_json::json!({}),
is_cached: true,
avg_execution_time_ms: 50,
};
registry.register_tool(metadata);
let mut pipeline = AsyncToolPipeline::new(2, 50, 3, Duration::from_millis(50));
pipeline.start().await?;
let request = ToolRequest {
call: ToolCallRequest {
id: "integration_test_request".to_string(),
tool_name: "integration_test".to_string(),
args: serde_json::json!({}),
metadata: None,
},
priority: ExecutionPriority::High,
timeout: Duration::from_secs(1),
context: ExecutionContext {
session_id: "integration_test_session".to_string(),
user_id: Some("test_user".to_string()),
workspace_path: "/tmp".to_string(),
parent_request_id: None,
},
};
let request_id = pipeline.submit_request(request).await?;
assert!(!request_id.is_empty());
tokio::time::sleep(Duration::from_millis(200)).await;
let pipeline_metrics = pipeline.get_metrics().await;
assert!(pipeline_metrics.total_requests > 0);
pool.return_string(test_string);
pipeline.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn test_performance_under_load() -> Result<()> {
use std::time::Instant;
let start_time = Instant::now();
let registry = OptimizedToolRegistry::new(8);
let mut pipeline = AsyncToolPipeline::new(8, 200, 10, Duration::from_millis(50));
pipeline.start().await?;
for i in 0..20 {
let metadata = ToolMetadata {
name: format!("load_test_tool_{}", i),
description: format!("Load test tool {}", i),
parameters: serde_json::json!({}),
is_cached: i % 3 == 0, avg_execution_time_ms: 25 + (i * 5) as u64,
};
registry.register_tool(metadata);
}
let mut handles = Vec::new();
for i in 0..100 {
let request = ToolRequest {
call: ToolCallRequest {
id: format!("load_test_{}", i),
tool_name: format!("load_test_tool_{}", i % 20),
args: serde_json::json!({"iteration": i}),
metadata: None,
},
priority: if i % 10 == 0 {
ExecutionPriority::High
} else {
ExecutionPriority::Normal
},
timeout: Duration::from_secs(2),
context: ExecutionContext {
session_id: format!("load_test_session_{}", i % 5),
user_id: Some(format!("user_{}", i % 10)),
workspace_path: "/tmp".to_string(),
parent_request_id: None,
},
};
let handle = pipeline.submit_request(request).await?;
handles.push(handle);
}
tokio::time::sleep(Duration::from_secs(2)).await;
let total_time = start_time.elapsed();
let metrics = pipeline.get_metrics().await;
assert!(metrics.total_requests >= 100);
assert!(total_time < Duration::from_secs(5)); assert!(metrics.avg_execution_time_ms < 1000.0);
println!("Load test completed in {:?}", total_time);
println!("Pipeline metrics: {:?}", metrics);
pipeline.shutdown().await?;
Ok(())
}
#[tokio::test]
async fn benchmark_memory_allocations() -> Result<()> {
use std::time::Instant;
let iterations = 10000;
let start = Instant::now();
for _ in 0..iterations {
let mut s = String::new();
s.push_str("test data");
let _v: Vec<String> = vec![s];
}
let without_pool = start.elapsed();
let pool = global_pool();
let start = Instant::now();
for _ in 0..iterations {
let mut s = pool.get_string();
s.push_str("test data");
let mut v = pool.get_vec();
v.push(s.clone());
pool.return_string(s);
pool.return_vec(v);
}
let with_pool = start.elapsed();
println!("Without pool: {:?}", without_pool);
println!("With pool: {:?}", with_pool);
Ok(())
}