#![cfg(feature = "ahp")]
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Instant;
use a3s_code_core::agent::{AgentConfig, AgentLoop};
use a3s_code_core::ahp::{AhpHookExecutor, AhpTransport};
use a3s_code_core::llm::OpenAiClient;
use a3s_code_core::tools::ToolExecutor;
fn test_tool_context() -> a3s_code_core::tools::ToolContext {
a3s_code_core::tools::ToolContext::new(PathBuf::from("/tmp"))
}
fn get_test_config() -> (String, String, String) {
let api_key =
std::env::var("MINIMAX_API_KEY").expect("MINIMAX_API_KEY environment variable not set");
let base_url = std::env::var("MINIMAX_BASE_URL")
.unwrap_or_else(|_| "https://api.minimax.io/v1/".to_string());
let model =
std::env::var("MINIMAX_MODEL").unwrap_or_else(|_| "MiniMax-M2.7-highspeed".to_string());
(api_key, base_url, model)
}
#[test]
#[ignore]
fn test_intent_detection_performance() {
let (api_key, base_url, model) = get_test_config();
let client = OpenAiClient::new(api_key.into(), model).with_base_url(base_url);
let tool_executor = Arc::new(ToolExecutor::new("/tmp".to_string()));
let config = AgentConfig::default();
let agent = AgentLoop::new(Arc::new(client), tool_executor, test_tool_context(), config);
let prompt = "Where is the main function in auth.rs? Explain how the login works and verify the test cases.";
for _ in 0..100 {
let _ = agent.detect_context_perception_intent(prompt, "test", "/workspace");
}
let iterations = 1000;
let start = Instant::now();
for _ in 0..iterations {
let _ = agent.detect_context_perception_intent(prompt, "test", "/workspace");
}
let elapsed = start.elapsed();
println!("\n=== Intent Detection Performance ===");
println!("Prompt: {}", prompt);
println!("Iterations: {}", iterations);
println!("Total time: {:?}", elapsed);
println!(
"Average: {:.3} µs/op",
elapsed.as_micros() as f64 / iterations as f64
);
}
#[test]
#[ignore]
fn test_context_perception_with_ahp_harness() {
let (api_key, base_url, model) = get_test_config();
let client = OpenAiClient::new(api_key.into(), model).with_base_url(base_url);
let tool_executor = Arc::new(ToolExecutor::new("/tmp".to_string()));
let rt = tokio::runtime::Runtime::new().unwrap();
println!("Creating AHP transport...");
let transport = AhpTransport::Stdio {
program: "echo".to_string(),
args: vec![],
};
let transport_result: Result<AhpTransport, Box<dyn std::error::Error + Send + Sync>> =
Ok(transport);
let hook_engine: Option<Arc<dyn a3s_code_core::hooks::HookExecutor>> = match transport_result {
Ok(transport) => {
println!("Transport created, connecting to harness...");
match rt.block_on(AhpHookExecutor::new_with_config(transport, 5000)) {
Ok(ahp) => {
println!("Connected to harness!");
Some(Arc::new(ahp) as Arc<dyn a3s_code_core::hooks::HookExecutor>)
}
Err(e) => {
println!("Failed to create AHP executor: {}", e);
None
}
}
}
Err(e) => {
println!("Failed to create transport: {}", e);
None
}
};
let has_harness = hook_engine.is_some();
let config = AgentConfig {
hook_engine,
..Default::default()
};
let agent = AgentLoop::new(Arc::new(client), tool_executor, test_tool_context(), config);
let prompt = "Where is the main function defined? Explain how the auth module works.";
println!("\n=== Context Perception Test ===");
println!("Prompt: {}", prompt);
let intent = agent.detect_context_perception_intent(prompt, "test-session", "/workspace");
println!("Detected intent: {:?}", intent.map(|i| i.intent));
if has_harness {
println!("Running full agent with harness...");
let start = Instant::now();
let result = rt.block_on(agent.execute_with_session(&[], prompt, Some("test"), None, None));
let elapsed = start.elapsed();
match result {
Ok(r) => {
let preview = if r.text.len() > 200 {
format!("{}...", &r.text[..200])
} else {
r.text.clone()
};
println!("Success!");
println!("Response: {}", preview);
println!("Tokens: {:?}", r.usage);
println!("Total time: {:?}", elapsed);
}
Err(e) => println!("Error: {}", e),
}
} else {
println!("No harness configured, skipping full test");
}
}
#[test]
#[ignore]
fn test_performance_comparison() {
let (api_key, base_url, model) = get_test_config();
let client1 =
OpenAiClient::new(api_key.clone().into(), model.clone()).with_base_url(base_url.clone());
let _client2 = OpenAiClient::new(api_key.into(), model).with_base_url(base_url);
let tool_executor = Arc::new(ToolExecutor::new("/tmp".to_string()));
let prompt = "Explain the codebase structure of this project.";
let config_no_ctx = AgentConfig::default();
let agent_no_ctx = AgentLoop::new(
Arc::new(client1),
tool_executor.clone(),
test_tool_context(),
config_no_ctx,
);
let rt = tokio::runtime::Runtime::new().unwrap();
println!("\n=== Performance Comparison ===");
println!("Prompt: {}", prompt);
println!("\nRunning WITHOUT context providers...");
let start = Instant::now();
let result =
rt.block_on(agent_no_ctx.execute_with_session(&[], prompt, Some("test"), None, None));
let without_time = start.elapsed();
match result {
Ok(r) => {
let preview = if r.text.len() > 100 {
format!("{}...", &r.text[..100])
} else {
r.text.clone()
};
println!("Time: {:?}", without_time);
println!("Tokens: {:?}", r.usage);
println!("Response preview: {}", preview);
}
Err(e) => println!("Error: {}", e),
}
}
#[test]
#[ignore]
fn test_all_intent_types() {
let (api_key, base_url, model) = get_test_config();
let client = OpenAiClient::new(api_key.into(), model).with_base_url(base_url);
let tool_executor = Arc::new(ToolExecutor::new("/tmp".to_string()));
let config = AgentConfig::default();
let agent = AgentLoop::new(Arc::new(client), tool_executor, test_tool_context(), config);
let test_cases = vec![
("Where is the main function?", "locate"),
("Find all files related to auth", "locate"),
("Locate the config file", "locate"),
("How does authentication work?", "understand"),
("What does this code do?", "understand"),
("Explain the login flow", "understand"),
("Remember what we discussed earlier?", "retrieve"),
("What was the previous approach?", "retrieve"),
("What files are in this project?", "explore"),
("Show me the project structure", "explore"),
("Why did the build fail?", "reason"),
("Why is this code structured this way?", "reason"),
("Verify this code is correct", "validate"),
("Check if the tests pass", "validate"),
("What's the difference between A and B?", "compare"),
("Compare these two approaches", "compare"),
("Show me the status of the task", "track"),
("What's the progress?", "track"),
];
println!("\n=== Intent Detection Test ===");
let mut passed = 0;
let mut failed = 0;
for (prompt, expected) in test_cases {
let intent = agent.detect_context_perception_intent(prompt, "test-session", "/workspace");
match intent {
Some(i) if i.intent == expected => {
println!("OK: '{}' -> {}", &prompt[..prompt.len().min(40)], expected);
passed += 1;
}
Some(i) => {
println!(
"FAIL: '{}' -> expected '{}', got '{}'",
&prompt[..prompt.len().min(40)],
expected,
i.intent
);
failed += 1;
}
None => {
println!(
"FAIL: '{}' -> expected '{}', got None",
&prompt[..prompt.len().min(40)],
expected
);
failed += 1;
}
}
}
println!("\nResults: {} passed, {} failed", passed, failed);
assert_eq!(failed, 0, "Some intent detection tests failed");
}