use anyhow::Result;
use std::io::Write;
use tokio_stream::StreamExt;
use ambi::llm::chat_template::ChatTemplateType;
use ambi::Agent;
use ambi::{LLMEngineConfig, OpenAIEngineConfig};
fn init_logger() {
use simplelog::*;
let _ = TermLogger::init(
LevelFilter::Debug,
Config::default(),
TerminalMode::Mixed,
ColorChoice::Auto,
);
}
#[tokio::main]
async fn main() -> Result<()> {
init_logger();
let system_prompt = "You are a helpful and harmless AI assistant.";
let api_key =
std::env::var("OPENAI_API_KEY").unwrap_or_else(|_| "your-default-api-key".to_string());
let engine_config = LLMEngineConfig::OpenAI(OpenAIEngineConfig {
api_key,
base_url: "https://api.openai.com/v1".to_string(), model_name: "gpt-4o-mini".to_string(),
temp: 0.7,
top_p: 0.9,
});
let mut agent = Agent::make(engine_config)
.await?
.template(ChatTemplateType::Chatml)
.preamble(system_prompt);
let mut res_stream = agent
.chat_stream("Who are you and what can you do?")
.await
.map_err(|_| anyhow::anyhow!("Failed to create chat stream"))?;
while let Some(chunk) = res_stream.next().await {
if let Ok(text) = chunk {
print!("{}", text);
let _ = std::io::stdout().flush(); }
}
println!();
Ok(())
}