Skip to main content

a3s_code_core/llm/
mod.rs

1//! LLM client abstraction layer
2//!
3//! Provides a unified interface for interacting with LLM providers
4//! (Anthropic Claude, OpenAI, Zhipu AI GLM, and OpenAI-compatible providers).
5
6pub mod anthropic;
7pub mod factory;
8pub mod http;
9pub mod openai;
10mod types;
11pub mod zhipu;
12
13// Re-export public types
14pub use anthropic::AnthropicClient;
15pub use factory::{create_client_with_config, LlmConfig};
16pub use http::{
17    clear_http_metrics_callback, default_http_client, set_http_metrics_callback, HttpClient,
18    HttpMetricsCallback, HttpMetricsRecord, HttpResponse, StreamingHttpResponse,
19};
20pub use openai::OpenAiClient;
21pub use types::*;
22pub use zhipu::ZhipuClient;
23
24use anyhow::Result;
25use async_trait::async_trait;
26use tokio::sync::mpsc;
27use tokio_util::sync::CancellationToken;
28
29/// LLM client trait
30#[async_trait]
31pub trait LlmClient: Send + Sync {
32    /// Complete a conversation (non-streaming)
33    async fn complete(
34        &self,
35        messages: &[Message],
36        system: Option<&str>,
37        tools: &[ToolDefinition],
38    ) -> Result<LlmResponse>;
39
40    /// Complete a conversation with streaming
41    /// Returns a receiver for streaming events.
42    /// The cancel_token is checked during the HTTP request; if cancelled, the request is aborted.
43    async fn complete_streaming(
44        &self,
45        messages: &[Message],
46        system: Option<&str>,
47        tools: &[ToolDefinition],
48        cancel_token: CancellationToken,
49    ) -> Result<mpsc::Receiver<StreamEvent>>;
50}
51
52// Include test modules — these reference internal types via crate paths
53#[cfg(test)]
54#[path = "tests.rs"]
55mod tests_file;