Skip to main content

agent_air_runtime/client/
mod.rs

1//! LLM client with provider-agnostic interface.
2
3/// Error types for LLM operations.
4pub mod error;
5/// HTTP client with TLS and retry logic.
6pub mod http;
7/// Message and request/response models.
8pub mod models;
9/// LLM provider implementations (Anthropic, OpenAI).
10pub mod providers;
11/// Provider trait definition.
12pub mod traits;
13
14use futures::Stream;
15use std::pin::Pin;
16
17use error::LlmError;
18use http::HttpClient;
19use models::{Message, MessageOptions, StreamEvent};
20use traits::LlmProvider;
21
22/// This is the main LLM Client.
23pub struct LLMClient {
24    http_client: HttpClient,
25    provider: Box<dyn LlmProvider + Send + Sync>,
26}
27
28impl LLMClient {
29    /// Create a new LLM client with the specified provider.
30    pub fn new(provider: Box<dyn LlmProvider + Send + Sync>) -> Result<Self, LlmError> {
31        Ok(Self {
32            http_client: HttpClient::new()?,
33            provider,
34        })
35    }
36
37    /// Send a message and wait for the complete response.
38    pub async fn send_message(
39        &self,
40        messages: &[Message],
41        options: &MessageOptions,
42    ) -> Result<Message, LlmError> {
43        self.provider
44            .send_msg(&self.http_client, messages, options)
45            .await
46    }
47
48    /// Send a message and receive a stream of response events.
49    pub async fn send_message_stream(
50        &self,
51        messages: &[Message],
52        options: &MessageOptions,
53    ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamEvent, LlmError>> + Send>>, LlmError> {
54        self.provider
55            .send_msg_stream(&self.http_client, messages, options)
56            .await
57    }
58}