use ambi::types::config::OpenAIEngineConfig;
use ambi::{Agent, AgentState, ChatRunner, LLMEngineConfig};
use anyhow::Result;
use futures::StreamExt;
use std::io::Write;
use std::sync::{Arc, Mutex};
#[tokio::main]
async fn main() -> Result<()> {
let engine_config = LLMEngineConfig::OpenAI(OpenAIEngineConfig {
api_key: "mock-key".to_string(),
base_url: "https://api.openai.com/v1".to_string(),
model_name: "gpt-4o-mini".to_string(),
temp: 0.7,
top_p: 0.9,
});
let chat_runner = ChatRunner;
let agent = Agent::make(engine_config).await?;
let agent_state = Arc::new(Mutex::new(AgentState::new()));
let mut res_stream = ChatRunner::chat_stream(
&chat_runner,
&agent,
&agent_state,
"Who are you and what can you do?",
)
.await?;
while let Some(chunk) = res_stream.next().await {
if let Ok(text) = chunk {
print!("{}", text);
let _ = std::io::stdout().flush();
}
}
println!();
Ok(())
}