use anyhow::Result;
use std::sync::Arc;
use tokio::sync::RwLock;
use ambi::llm::ChatTemplateType;
use ambi::types::config::OpenAIEngineConfig;
use ambi::{Agent, ChatRunner};
use ambi::{AgentState, LLMEngineConfig};
#[tokio::main]
async fn main() -> Result<()> {
let system_prompt = "You are a helpful and harmless AI assistant.";
let api_key =
std::env::var("OPENAI_API_KEY").unwrap_or_else(|_| "your-default-api-key".to_string());
let engine_config = LLMEngineConfig::OpenAI(OpenAIEngineConfig {
api_key,
base_url: "https://api.openai.com/v1".to_string(), model_name: "gpt-4o-mini".to_string(), temp: 0.7, top_p: 0.9, });
let chat_runner = ChatRunner;
let agent = Agent::make(engine_config)
.await?
.template(ChatTemplateType::Chatml)
.preamble(system_prompt);
let agent_state = Arc::new(RwLock::new(AgentState::new()));
let res = ChatRunner::chat(
&chat_runner,
&agent,
&agent_state,
"Who are you and what can you do?",
)
.await?;
print!("{}", res);
Ok(())
}