llm_connector/lib.rs
1//! # llm-connector
2//!
3//! Next-generation Rust library for LLM protocol abstraction.
4//!
5//! Supports 5 protocols: OpenAI, Anthropic, Aliyun, Zhipu, Ollama.
6//! Clean architecture with clear Protocol/Provider separation.
7//!
8//! ## Quick Start
9//!
10//! ### OpenAI Protocol
11//! ```rust,no_run
12//! use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
13//!
14//! #[tokio::main]
15//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
16//! // OpenAI
17//! let client = LlmClient::openai("sk-...")?;
18//!
19//! let request = ChatRequest {
20//! model: "gpt-4".to_string(),
21//! messages: vec![Message::text(Role::User, "Hello!")],
22//! ..Default::default()
23//! };
24//!
25//! let response = client.chat(&request).await?;
26//! println!("Response: {}", response.content);
27//! Ok(())
28//! }
29//! ```
30//!
31//! ### Anthropic Protocol
32//! ```rust,no_run
33//! use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
34//!
35//! #[tokio::main]
36//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
37//! let client = LlmClient::anthropic("sk-ant-...")?;
38//! let request = ChatRequest {
39//! model: "claude-3-5-sonnet-20241022".to_string(),
40//! messages: vec![Message::text(Role::User, "Hello!")],
41//! ..Default::default()
42//! };
43//!
44//! let response = client.chat(&request).await?;
45//! println!("Response: {}", response.content);
46//! Ok(())
47//! }
48//! ```
49//!
50//! ### Aliyun Protocol (DashScope)
51//! ```rust,no_run
52//! use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
53//!
54//! #[tokio::main]
55//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
56//! let client = LlmClient::aliyun("sk-...")?;
57//! let request = ChatRequest {
58//! model: "qwen-turbo".to_string(),
59//! messages: vec![Message::text(Role::User, "Hello!")],
60//! ..Default::default()
61//! };
62//!
63//! let response = client.chat(&request).await?;
64//! println!("Response: {}", response.content);
65//! Ok(())
66//! }
67//! ```
68//!
69//! ### Ollama Protocol (Local)
70//! ```rust,no_run
71//! use llm_connector::{LlmClient, Provider, types::{ChatRequest, Message, Role}};
72//!
73//! #[tokio::main]
74//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
75//! // Default: localhost:11434
76//! let client = LlmClient::ollama()?;
77//!
78//! // Custom URL
79//! let client = LlmClient::ollama_with_base_url("http://192.168.1.100:11434")?;
80//!
81//! let request = ChatRequest {
82//! model: "llama3.2".to_string(),
83//! messages: vec![Message::text(Role::User, "Hello!")],
84//! ..Default::default()
85//! };
86//!
87//! let response = client.chat(&request).await?;
88//! println!("Response: {}", response.content);
89//!
90//! // Ollama special features
91//! if let Some(ollama) = client.as_ollama() {
92//! let models = ollama.models().await?;
93//! println!("Available models: {:?}", models);
94//! }
95//!
96//! Ok(())
97//! }
98//! ```
99//!
100//! ## Installation
101//!
102//! Add to your `Cargo.toml`:
103//!
104//! ```toml
105//! [dependencies]
106//! llm-connector = "0.2"
107//! tokio = { version = "1", features = ["full"] }
108//! ```
109//!
110//! Optional features:
111//! ```toml
112//! llm-connector = { version = "0.2", features = ["streaming"] }
113//! ```
114
115// Core modules (V2 Architecture - Default)
116pub mod client;
117pub mod config;
118pub mod core;
119pub mod error;
120pub mod protocols;
121pub mod providers;
122pub mod types;
123
124// Server-Sent Events (SSE) utilities
125pub mod sse;
126
127// Re-exports for convenience (V2 Architecture)
128pub use client::LlmClient;
129pub use config::ProviderConfig;
130pub use error::LlmConnectorError;
131pub use types::{ChatRequest, ChatResponse, Choice, Message, Usage, Role};
132
133// Re-export core traits
134pub use core::{Protocol, Provider, GenericProvider, HttpClient};
135
136// Re-export protocols
137// 导出标准协议
138pub use protocols::{OpenAIProtocol, AnthropicProtocol};
139
140// 导出私有协议(从 providers 中)
141pub use providers::{AliyunProtocol, ZhipuProtocol};
142
143// Re-export providers
144pub use providers::{
145 OpenAIProvider, AliyunProvider, AnthropicProvider, ZhipuProvider, OllamaProvider,
146 // Convenience functions
147 openai, aliyun, anthropic, zhipu, ollama,
148};
149
150#[cfg(feature = "streaming")]
151pub use types::{
152 ChatStream, Delta, StreamingChoice, StreamingResponse,
153 StreamingFormat, StreamingConfig, OllamaStreamChunk, OllamaMessage, OllamaChatStream,
154 StreamFormat, StreamChunk, UniversalChatStream
155};
156