Skip to main content

wesichain_llm/
lib.rs

1mod ollama;
2
3// OpenAI-compatible client (always available)
4pub mod openai_compatible;
5
6// Provider-specific clients (feature-gated)
7pub mod providers;
8
9pub use ollama::{ollama_stream_events, OllamaClient};
10pub use wesichain_core::{LlmRequest, LlmResponse, Message, Role, ToolCall, ToolSpec};
11
12// Re-export generic client
13pub use openai_compatible::{
14    ChatCompletionRequest, OpenAiCompatibleBuilder, OpenAiCompatibleClient,
15};
16
17// Re-export provider clients
18#[cfg(feature = "openai")]
19pub use providers::openai::OpenAiClient;
20
21#[cfg(feature = "deepseek")]
22pub use providers::deepseek::DeepSeekClient;
23
24#[cfg(feature = "google")]
25pub use providers::google::GoogleClient;
26
27#[cfg(feature = "azure")]
28pub use providers::azure::AzureOpenAiClient;
29
30#[cfg(feature = "mistral")]
31pub use providers::mistral::MistralClient;
32
33use wesichain_core::Runnable;
34
35#[deprecated(
36    note = "Use Runnable<LlmRequest, LlmResponse> directly or ToolCallingLlm for advanced features"
37)]
38pub trait Llm: Runnable<LlmRequest, LlmResponse> {}
39
40#[allow(deprecated)]
41impl<T> Llm for T where T: Runnable<LlmRequest, LlmResponse> {}