pub struct UnifiedLLMClient { /* private fields */ }Expand description
Unified client for multi-provider LLM operations.
UnifiedLLMClient is the primary interface for using multi-llm. It wraps
all supported providers behind a single LlmProvider interface, allowing
you to switch providers without changing your application code.
§Quick Start
use multi_llm::{unwrap_response, UnifiedLLMClient, LLMConfig, UnifiedMessage, UnifiedLLMRequest, LlmProvider};
// Create client from environment variables
let client = UnifiedLLMClient::from_env()?;
// Build a request
let request = UnifiedLLMRequest::new(vec![
UnifiedMessage::system("You are a helpful assistant."),
UnifiedMessage::user("What's the capital of France?"),
]);
// Execute the request
let response = unwrap_response!(client.execute_llm(request, None, None).await?);
println!("Response: {}", response.content);§From Configuration
use multi_llm::{UnifiedLLMClient, LLMConfig, OpenAIConfig, DefaultLLMParams};
let config = LLMConfig {
provider: Box::new(OpenAIConfig {
api_key: Some("sk-...".to_string()),
default_model: "gpt-4-turbo-preview".to_string(),
..Default::default()
}),
default_params: DefaultLLMParams::default(),
};
let client = UnifiedLLMClient::from_config(config)?;§Tool Calling
use multi_llm::{unwrap_response, UnifiedLLMClient, UnifiedMessage, UnifiedLLMRequest, RequestConfig, Tool, ToolChoice, LlmProvider};
// Define a tool
let weather_tool = Tool {
name: "get_weather".to_string(),
description: "Get current weather".to_string(),
parameters: serde_json::json!({
"type": "object",
"properties": {
"city": {"type": "string"}
},
"required": ["city"]
}),
};
let request = UnifiedLLMRequest::new(vec![
UnifiedMessage::user("What's the weather in Paris?"),
]);
let config = RequestConfig {
tools: vec![weather_tool],
tool_choice: Some(ToolChoice::Auto),
..Default::default()
};
let response = unwrap_response!(client.execute_llm(request, None, Some(config)).await?);
// Check for tool calls
if !response.tool_calls.is_empty() {
for call in &response.tool_calls {
println!("Tool call: {} with {}", call.name, call.arguments);
// Execute tool and continue conversation...
}
}§Supported Providers
| Provider | Config Type | API Key Required |
|---|---|---|
| Anthropic | AnthropicConfig | Yes |
| OpenAI | OpenAIConfig | Yes |
| Ollama | OllamaConfig | No (local) |
| LM Studio | LMStudioConfig | No (local) |
Implementations§
Source§impl UnifiedLLMClient
impl UnifiedLLMClient
Sourcepub fn create(
provider_name: &str,
model: String,
config: LLMConfig,
) -> LlmResult<Self>
pub fn create( provider_name: &str, model: String, config: LLMConfig, ) -> LlmResult<Self>
Factory method to create UnifiedLLMClient with all parameters This is the primary constructor for production use
§Errors
Returns LlmError::UnsupportedProvider if the provider name is not recognized.
Supported providers are: “anthropic”, “openai”, “lmstudio”, “ollama”.
Returns LlmError::ConfigurationError if:
- The provider configuration type doesn’t match the provider name
- Required configuration fields are missing (e.g., API key for OpenAI/Anthropic)
- Configuration validation fails (e.g., invalid base URL format)
Sourcepub fn from_env() -> LlmResult<Self>
pub fn from_env() -> LlmResult<Self>
Create a client using environment variables for configuration
§Errors
Returns LlmError::ConfigurationError if:
- Required environment variables are missing
- Environment variable values are invalid or malformed
- Provider configuration validation fails
Sourcepub fn from_config(config: LLMConfig) -> LlmResult<Self>
pub fn from_config(config: LLMConfig) -> LlmResult<Self>
Create a client from an LLMConfig (backward compatibility)
§Errors
Returns LlmError::UnsupportedProvider if the provider name in the config is not recognized.
Returns LlmError::ConfigurationError if:
- Provider configuration validation fails
- Required provider-specific settings are missing
Trait Implementations§
Source§impl LlmProvider for UnifiedLLMClient
Implement LlmProvider for UnifiedLLMClient
Just delegates to the underlying provider - providers already handle events feature correctly
impl LlmProvider for UnifiedLLMClient
Implement LlmProvider for UnifiedLLMClient Just delegates to the underlying provider - providers already handle events feature correctly