#[cfg(test)]
mod tests {
use ambi::types::config::OpenAIEngineConfig;
use ambi::{Agent, ChatRunner, LLMEngineConfig};
#[tokio::test]
async fn test_ollama_local_api() {
let base_url =
std::env::var("TEST_BASE_URL").unwrap_or("https://api.openai.com/v1".to_string());
let model_name = std::env::var("TEST_MODEL_NAME").unwrap_or("gpt-4o-mini".to_string());
let api_key = std::env::var("TEST_API_KEY").unwrap_or("test".to_string());
let cfg = OpenAIEngineConfig {
api_key,
base_url,
model_name,
temp: 0.7,
top_p: 0.9,
};
let mut agent = Agent::make(LLMEngineConfig::OpenAI(cfg)).await.unwrap();
let res = ChatRunner::chat(&mut agent, "who are you").await.unwrap();
println!("{}", res);
}
}