Skip to main content

agent_runtime/llm/
mod.rs

1use async_trait::async_trait;
2use tokio::sync::mpsc;
3
4pub mod mock;
5pub mod provider;
6pub mod types; // Always available for testing
7
8pub use mock::{MockLlmClient, MockResponse, MockToolCall};
9pub use provider::{LlamaClient, OpenAIClient};
10pub use types::{ChatMessage, ChatRequest, ChatResponse, Role};
11
12/// Result type for LLM operations
13pub type LlmResult<T> = Result<T, LlmError>;
14
15/// Errors that can occur during LLM operations
16#[derive(Debug, thiserror::Error)]
17pub enum LlmError {
18    #[error("API error: {0}")]
19    ApiError(String),
20
21    #[error("Network error: {0}")]
22    NetworkError(String),
23
24    #[error("Invalid request: {0}")]
25    InvalidRequest(String),
26
27    #[error("Rate limit exceeded")]
28    RateLimitExceeded,
29
30    #[error("Authentication failed: {0}")]
31    AuthenticationFailed(String),
32
33    #[error("Response parsing error: {0}")]
34    ParseError(String),
35}
36
37/// Generic trait for LLM chat clients
38#[async_trait]
39pub trait ChatClient: Send + Sync {
40    /// Send a chat completion request
41    async fn chat(&self, request: ChatRequest) -> LlmResult<ChatResponse>;
42
43    /// Stream a chat completion request with channel-based delivery
44    async fn chat_stream(
45        &self,
46        request: ChatRequest,
47        tx: mpsc::Sender<String>,
48    ) -> LlmResult<ChatResponse>;
49}