pub struct AiClient { /* private fields */ }Expand description
统一的AI客户端,提供跨厂商的AI服务访问接口
Unified AI client
Usage example:
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Switch model provider by changing Provider value
let client = AiClient::new(Provider::Groq)?;
let request = ChatCompletionRequest::new(
"test-model".to_string(),
vec![Message {
role: Role::User,
content: ai_lib::types::common::Content::Text("Hello".to_string()),
function_call: None,
}],
);
// Note: Set GROQ_API_KEY environment variable for actual API calls
// Optional: Set AI_PROXY_URL environment variable to use proxy server
// let response = client.chat_completion(request).await?;
println!(
"Client created successfully with provider: {}",
client.provider_name()
);
println!("Request prepared for model: {}", request.model);
Ok(())
}§Proxy Configuration
Configure proxy server by setting the AI_PROXY_URL environment variable:
export AI_PROXY_URL=http://proxy.example.com:8080Supported proxy formats:
- HTTP proxy:
http://proxy.example.com:8080 - HTTPS proxy:
https://proxy.example.com:8080 - With authentication:
http://user:pass@proxy.example.com:8080
Implementations§
Source§impl AiClient
impl AiClient
Sourcepub fn default_chat_model(&self) -> String
pub fn default_chat_model(&self) -> String
Get the effective default chat model for this client (honors custom override)
Sourcepub fn new(provider: Provider) -> Result<Self, AiLibError>
pub fn new(provider: Provider) -> Result<Self, AiLibError>
Create a new AI client
Sourcepub fn builder(provider: Provider) -> AiClientBuilder
pub fn builder(provider: Provider) -> AiClientBuilder
Create a new AI client builder
Sourcepub fn new_with_metrics(
provider: Provider,
metrics: Arc<dyn Metrics>,
) -> Result<Self, AiLibError>
pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>
Create AiClient with injected metrics implementation
Sourcepub fn with_options(
provider: Provider,
opts: ConnectionOptions,
) -> Result<Self, AiLibError>
pub fn with_options( provider: Provider, opts: ConnectionOptions, ) -> Result<Self, AiLibError>
Create client with minimal explicit options (base_url/proxy/timeout).
Fields left as None in ConnectionOptions will fall back to environment
variables (e.g., OPENAI_API_KEY, AI_PROXY_URL, AI_TIMEOUT_SECS).
Set disable_proxy: true to prevent automatic proxy detection from AI_PROXY_URL.
pub fn connection_options(&self) -> Option<&ConnectionOptions>
Sourcepub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
Set metrics implementation on client
Sourcepub fn model_resolver(&self) -> Arc<ModelResolver>
pub fn model_resolver(&self) -> Arc<ModelResolver>
Access the model resolver (advanced customization).
Sourcepub async fn chat_completion(
&self,
request: ChatCompletionRequest,
) -> Result<ChatCompletionResponse, AiLibError>
pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>
Send chat completion request
Sourcepub async fn chat_completion_stream(
&self,
request: ChatCompletionRequest,
) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
Streaming chat completion request
Sourcepub async fn chat_completion_stream_with_cancel(
&self,
request: ChatCompletionRequest,
) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
Streaming chat completion request with cancel control
Sourcepub async fn chat_completion_batch(
&self,
requests: Vec<ChatCompletionRequest>,
concurrency_limit: Option<usize>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Batch chat completion requests
Sourcepub async fn chat_completion_batch_smart(
&self,
requests: Vec<ChatCompletionRequest>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Smart batch processing
Sourcepub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
Get list of supported models
Sourcepub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
Switch AI model provider
Sourcepub fn provider_name(&self) -> &str
pub fn provider_name(&self) -> &str
Get the active provider name reported by the underlying strategy.
Sourcepub fn build_simple_request<S: Into<String>>(
&self,
prompt: S,
) -> ChatCompletionRequest
pub fn build_simple_request<S: Into<String>>( &self, prompt: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with the provider’s default chat model.
Sourcepub fn build_simple_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_simple_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified chat model.
Sourcepub fn build_multimodal_request<S: Into<String>>(
&self,
prompt: S,
) -> Result<ChatCompletionRequest, AiLibError>
pub fn build_multimodal_request<S: Into<String>>( &self, prompt: S, ) -> Result<ChatCompletionRequest, AiLibError>
Convenience helper: construct a request with the provider’s default multimodal model.
Sourcepub fn build_multimodal_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_multimodal_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified multimodal model.
Sourcepub async fn quick_chat_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_chat_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider, send a single user prompt
Sourcepub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper with model
Sourcepub async fn quick_multimodal_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper multimodal
Sourcepub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper multimodal with model
Sourcepub async fn quick_chat_text_with_options<P: Into<String>>(
provider: Provider,
prompt: P,
options: ModelOptions,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_options<P: Into<String>>( provider: Provider, prompt: P, options: ModelOptions, ) -> Result<String, AiLibError>
One-shot helper with model options
Sourcepub async fn upload_file(&self, path: &str) -> Result<String, AiLibError>
pub async fn upload_file(&self, path: &str) -> Result<String, AiLibError>
Upload a local file