agent_core/client/
mod.rs

1//! LLM client with provider-agnostic interface.
2
3/// Error types for LLM operations.
4pub mod error;
5/// HTTP client with TLS and retry logic.
6pub mod http;
7/// Message and request/response models.
8pub mod models;
9/// LLM provider implementations (Anthropic, OpenAI).
10pub mod providers;
11/// Provider trait definition.
12pub mod traits;
13
14use futures::Stream;
15use std::pin::Pin;
16
17use error::LlmError;
18use http::HttpClient;
19use models::{Message, MessageOptions, StreamEvent};
20use traits::LlmProvider;
21
22/// This is the main LLM Client.
23pub struct LLMClient {
24    http_client: HttpClient,
25    provider: Box<dyn LlmProvider + Send + Sync>,
26}
27
28impl LLMClient {
29    /// Create a new LLM client with the specified provider.
30    pub fn new(provider: Box<dyn LlmProvider + Send + Sync>) -> Result<Self, LlmError> {
31        Ok(Self {
32            http_client: HttpClient::new()?,
33            provider,
34        })
35    }
36
37    /// Send a message and wait for the complete response.
38    pub async fn send_message(&self, messages: &[Message], options: &MessageOptions) -> Result<Message, LlmError> {
39        self.provider.send_msg(&self.http_client, messages, options).await
40    }
41
42    /// Send a message and receive a stream of response events.
43    pub async fn send_message_stream(
44        &self,
45        messages: &[Message],
46        options: &MessageOptions,
47    ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamEvent, LlmError>> + Send>>, LlmError> {
48        self.provider.send_msg_stream(&self.http_client, messages, options).await
49    }
50}