use anyhow::Result;
use ambi::llm::providers::openai_api::OpenAIEngineConfig;
use ambi::llm::ChatTemplateType;
use ambi::LLMEngineConfig;
use ambi::{Agent, ChatPipeline};
#[tokio::main]
async fn main() -> Result<()> {
let system_prompt = "You are a helpful and harmless AI assistant.";
let api_key =
std::env::var("OPENAI_API_KEY").unwrap_or_else(|_| "your-default-api-key".to_string());
let engine_config = LLMEngineConfig::OpenAI(OpenAIEngineConfig {
api_key,
base_url: "https://api.openai.com/v1".to_string(), model_name: "gpt-4o-mini".to_string(), temp: 0.7, top_p: 0.9, });
let mut agent = Agent::make(engine_config)
.await?
.template(ChatTemplateType::Chatml)
.preamble(system_prompt);
let res = agent
.chat("Who are you and what can you do?")
.await
.map_err(|_| anyhow::anyhow!("Failed to create chat stream"))?;
print!("{}", res);
Ok(())
}