use ambi::error::Result;
use ambi::llm::LLMEngineTrait;
use ambi::types::LLMRequest;
use ambi::{impl_as_any, Agent, LLMEngineConfig};
use async_trait::async_trait;
struct MyCompanyEngine {
_api_key: String,
}
#[async_trait]
impl LLMEngineTrait for MyCompanyEngine {
impl_as_any!();
async fn chat(&self, _request: LLMRequest) -> Result<String> {
Ok("I am a custom AI assistant.".to_string())
}
async fn chat_stream(
&self,
_request: LLMRequest,
tx: tokio::sync::mpsc::Sender<Result<String>>,
) {
let _ = tx.send(Ok("Streaming ".to_string())).await;
let _ = tx.send(Ok("response...".to_string())).await;
}
fn reset_context(&self) {
}
}
#[tokio::main]
async fn main() -> Result<()> {
let system_prompt = "You are a helpful and harmless AI assistant.";
let my_engine = Box::new(MyCompanyEngine {
_api_key: "secret-api-key".to_string(),
});
let _agent = Agent::make(LLMEngineConfig::Custom(my_engine))
.await?
.preamble(system_prompt);
Ok(())
}