Expand description
§llm-connector
Next-generation Rust library for LLM protocol abstraction.
Supports 5 protocols: OpenAI, Anthropic, Aliyun, Zhipu, Ollama. Clean architecture with clear Protocol/Provider separation.
§Quick Start
§OpenAI Protocol
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// OpenAI
let client = LlmClient::openai("sk-...", "https://api.openai.com/v1")?;
let request = ChatRequest {
model: "gpt-4".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Anthropic Protocol
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = LlmClient::anthropic("sk-ant-...", "https://api.anthropic.com")?;
let request = ChatRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Aliyun Protocol (DashScope)
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = LlmClient::aliyun("sk-...", "https://dashscope.aliyuncs.com")?;
let request = ChatRequest {
model: "qwen-turbo".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Ollama Protocol (Local)
use llm_connector::{LlmClient, Provider, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Default: localhost:11434
// Default: localhost:11434
let client = LlmClient::ollama("http://localhost:11434")?;
// Custom URL
let client = LlmClient::ollama("http://192.168.1.100:11434")?;
let request = ChatRequest {
model: "llama3.2".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
// Ollama special features
if let Some(ollama) = client.as_ollama() {
let models = ollama.models().await?;
println!("Available models: {:?}", models);
}
Ok(())
}§Installation
Add to your Cargo.toml:
[dependencies]
llm-connector = "0.2"
tokio = { version = "1", features = ["full"] }Optional features:
llm-connector = { version = "0.2", features = ["streaming"] }Re-exports§
pub use client::LlmClient;pub use config::ProviderConfig;pub use error::LlmConnectorError;pub use types::ChatRequest;pub use types::ChatResponse;pub use types::Choice;pub use types::FunctionCall;pub use types::JsonSchemaSpec;pub use types::Message;pub use types::ResponseFormat;pub use types::ResponsesRequest;pub use types::ResponsesResponse;pub use types::ResponsesStreamEvent;pub use types::ResponsesUsage;pub use types::Role;pub use types::Tool;pub use types::ToolCall;pub use types::ToolChoice;pub use types::Usage;pub use core::GenericProvider;pub use core::HttpClient;pub use core::Protocol;pub use core::Provider;pub use protocols::AliyunProtocol;pub use protocols::AnthropicProtocol;pub use protocols::GoogleProtocol;pub use protocols::OllamaProtocol;pub use protocols::OpenAIProtocol;pub use protocols::ZhipuProtocol;pub use providers::AliyunProvider;pub use providers::AnthropicProvider;pub use providers::OllamaProvider;pub use providers::OpenAIProvider;pub use providers::ZhipuProvider;pub use providers::aliyun;pub use providers::anthropic;pub use providers::ollama;pub use providers::openai;pub use providers::xinference;pub use providers::zhipu;pub use types::AnthropicSseAdapter;pub use types::ChatStream;pub use types::Delta;pub use types::OllamaChatStream;pub use types::OllamaMessage;pub use types::OllamaStreamChunk;pub use types::ResponsesStream;pub use types::StreamChunk;pub use types::StreamFormat;pub use types::StreamingChoice;pub use types::StreamingConfig;pub use types::StreamingFormat;pub use types::StreamingResponse;pub use types::UniversalChatStream;
Modules§
- builder
- Builder pattern for LlmClient
- client
- V2 Unified Client - Next-generation LLM client interface
- config
- Configuration management for LLM providers
- core
- V2 Architecture Core Module
- error
- Error types for llm-connector
- protocols
- Protocol Module - Public Standard Protocols
- providers
- V2 Service Provider Module
- sse
- Server-Sent Events (SSE) streaming utilities
- types
- Core types for llm-connector