Expand description
§llm-connector
Next-generation Rust library for LLM protocol abstraction.
Supports 5 protocols: OpenAI, Anthropic, Aliyun, Zhipu, Ollama. Clean architecture with clear Protocol/Provider separation.
§Quick Start
§OpenAI Protocol
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// OpenAI
let client = LlmClient::openai("sk-...")?;
let request = ChatRequest {
model: "gpt-4".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Anthropic Protocol
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = LlmClient::anthropic("sk-ant-...")?;
let request = ChatRequest {
model: "claude-3-5-sonnet-20241022".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Aliyun Protocol (DashScope)
use llm_connector::{LlmClient, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = LlmClient::aliyun("sk-...")?;
let request = ChatRequest {
model: "qwen-turbo".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
Ok(())
}§Ollama Protocol (Local)
use llm_connector::{LlmClient, Provider, types::{ChatRequest, Message, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Default: localhost:11434
let client = LlmClient::ollama()?;
// Custom URL
let client = LlmClient::ollama_with_base_url("http://192.168.1.100:11434")?;
let request = ChatRequest {
model: "llama3.2".to_string(),
messages: vec![Message::text(Role::User, "Hello!")],
..Default::default()
};
let response = client.chat(&request).await?;
println!("Response: {}", response.content);
// Ollama special features
if let Some(ollama) = client.as_ollama() {
let models = ollama.models().await?;
println!("Available models: {:?}", models);
}
Ok(())
}§Installation
Add to your Cargo.toml:
[dependencies]
llm-connector = "0.2"
tokio = { version = "1", features = ["full"] }Optional features:
llm-connector = { version = "0.2", features = ["streaming"] }Re-exports§
pub use client::LlmClient;pub use config::ProviderConfig;pub use error::LlmConnectorError;pub use types::ChatRequest;pub use types::ChatResponse;pub use types::Choice;pub use types::Message;pub use types::Usage;pub use types::Role;pub use core::Protocol;pub use core::Provider;pub use core::GenericProvider;pub use core::HttpClient;pub use protocols::OpenAIProtocol;pub use protocols::AnthropicProtocol;pub use providers::AliyunProtocol;pub use providers::ZhipuProtocol;pub use providers::OpenAIProvider;pub use providers::AliyunProvider;pub use providers::AnthropicProvider;pub use providers::ZhipuProvider;pub use providers::OllamaProvider;pub use providers::openai;pub use providers::openai;pub use providers::aliyun;pub use providers::aliyun;pub use providers::anthropic;pub use providers::anthropic;pub use providers::zhipu;pub use providers::zhipu;pub use providers::ollama;pub use providers::ollama;