tiny_loop/llm.rs
1mod openai;
2
3use crate::types::{Message, ToolDefinition};
4use async_trait::async_trait;
5
6pub use openai::*;
7
8/// Callback for streaming LLM responses
9pub type StreamCallback = Box<dyn FnMut(String) + Send>;
10
11/// LLM provider trait for making API calls
12#[async_trait]
13pub trait LLMProvider: Send + Sync {
14 /// Call the LLM with messages and available tools, returning the assistant's response
15 ///
16 /// If `stream_callback` is provided, the LLM will be invoked in streaming mode,
17 /// calling the callback for each chunk of the response as it arrives.
18 async fn call(
19 &self,
20 messages: &[Message],
21 tools: &[ToolDefinition],
22 stream_callback: Option<&mut StreamCallback>,
23 ) -> anyhow::Result<Message>;
24}