use ambi::llm::providers::openai_api::config::OpenAIEngineConfig;
use ambi::types::ToolErr;
use ambi::{macros::tool, Agent, AgentState, ChatRunner, LLMEngineConfig};
use anyhow::Result;
use serde::Serialize;
#[derive(Serialize)]
pub struct WeatherOutput {
temp: f32,
condition: String,
}
#[tool(name = "check_city_weather", timeout = 10, idempotent)]
async fn get_weather(
city: String,
days: Option<u32>,
) -> core::result::Result<WeatherOutput, ToolErr> {
let _query_days = days.unwrap_or(1);
println!("Checking weather for {} ...", city);
Ok(WeatherOutput {
temp: 22.5,
condition: "Sunny".into(),
})
}
#[tokio::main]
async fn main() -> Result<()> {
let api_key =
std::env::var("OPENAI_API_KEY").unwrap_or_else(|_| "your-default-api-key".to_string());
let engine_config = LLMEngineConfig::OpenAI(OpenAIEngineConfig {
api_key,
base_url: "https://api.openai.com/v1".to_string(), model_name: "gpt-4o-mini".to_string(), temp: 0.7, top_p: 0.9, });
let chat_runner = ChatRunner::default();
let agent = Agent::make(engine_config).await?.tool(GetWeatherTool)?;
let agent_state = AgentState::new_shared("session-id");
let res = chat_runner
.chat(&agent, &agent_state, "What's the weather in Beijing?")
.await?;
println!("Assistant: {}", res);
Ok(())
}