pub struct AiClient { /* private fields */ }
Expand description
Unified AI client
Usage example:
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Switch model provider by changing Provider value
let client = AiClient::new(Provider::Groq)?;
let request = ChatCompletionRequest::new(
"test-model".to_string(),
vec![Message {
role: Role::User,
content: ai_lib::types::common::Content::Text("Hello".to_string()),
function_call: None,
}],
);
// Note: Set GROQ_API_KEY environment variable for actual API calls
// Optional: Set AI_PROXY_URL environment variable to use proxy server
// let response = client.chat_completion(request).await?;
println!("Client created successfully with provider: {:?}", client.current_provider());
println!("Request prepared for model: {}", request.model);
Ok(())
}
§Proxy Configuration
Configure proxy server by setting the AI_PROXY_URL
environment variable:
export AI_PROXY_URL=http://proxy.example.com:8080
Supported proxy formats:
- HTTP proxy:
http://proxy.example.com:8080
- HTTPS proxy:
https://proxy.example.com:8080
- With authentication:
http://user:pass@proxy.example.com:8080
Implementations§
Source§impl AiClient
impl AiClient
Sourcepub fn default_chat_model(&self) -> String
pub fn default_chat_model(&self) -> String
Get the effective default chat model for this client (honors custom override)
Sourcepub fn new(provider: Provider) -> Result<Self, AiLibError>
pub fn new(provider: Provider) -> Result<Self, AiLibError>
Sourcepub fn with_options(
provider: Provider,
opts: ConnectionOptions,
) -> Result<Self, AiLibError>
pub fn with_options( provider: Provider, opts: ConnectionOptions, ) -> Result<Self, AiLibError>
Create client with minimal explicit options (base_url/proxy/timeout). Not all providers support overrides; unsupported providers ignore unspecified fields gracefully.
pub fn connection_options(&self) -> Option<&ConnectionOptions>
Sourcepub fn builder(provider: Provider) -> AiClientBuilder
pub fn builder(provider: Provider) -> AiClientBuilder
Create a new AI client builder
The builder pattern allows more flexible client configuration:
- Automatic environment variable detection
- Support for custom base_url and proxy
- Support for custom timeout and connection pool configuration
§Arguments
provider
- The AI model provider to use
§Returns
AiClientBuilder
- Builder instance
§Example
use ai_lib::{AiClient, Provider};
// Simplest usage - automatic environment variable detection
let client = AiClient::builder(Provider::Groq).build()?;
// Custom base_url and proxy
let client = AiClient::builder(Provider::Groq)
.with_base_url("https://custom.groq.com")
.with_proxy(Some("http://proxy.example.com:8080"))
.build()?;
Sourcepub fn new_with_metrics(
provider: Provider,
metrics: Arc<dyn Metrics>,
) -> Result<Self, AiLibError>
pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>
Create AiClient with injected metrics implementation
Sourcepub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self
Set metrics implementation on client
Sourcepub async fn chat_completion(
&self,
request: ChatCompletionRequest,
) -> Result<ChatCompletionResponse, AiLibError>
pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>
Sourcepub async fn chat_completion_stream(
&self,
request: ChatCompletionRequest,
) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>
Sourcepub async fn chat_completion_stream_with_cancel(
&self,
request: ChatCompletionRequest,
) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>
Sourcepub async fn chat_completion_batch(
&self,
requests: Vec<ChatCompletionRequest>,
concurrency_limit: Option<usize>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Batch chat completion requests
§Arguments
requests
- List of chat completion requestsconcurrency_limit
- Maximum concurrent request count (None means unlimited)
§Returns
Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
- Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = AiClient::new(Provider::Groq)?;
let requests = vec![
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("Hello".to_string()),
function_call: None,
}],
),
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("How are you?".to_string()),
function_call: None,
}],
),
];
// Limit concurrency to 5
let responses = client.chat_completion_batch(requests, Some(5)).await?;
for (i, response) in responses.iter().enumerate() {
match response {
Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
Err(e) => println!("Request {} failed: {}", i, e),
}
}
Ok(())
}
Sourcepub async fn chat_completion_batch_smart(
&self,
requests: Vec<ChatCompletionRequest>,
) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
Sourcepub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>
Batch chat completion requests
§Arguments
requests
- List of chat completion requestsconcurrency_limit
- Maximum concurrent request count (None means unlimited)
§Returns
Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>
- Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = AiClient::new(Provider::Groq)?;
let requests = vec![
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("Hello".to_string()),
function_call: None,
}],
),
ChatCompletionRequest::new(
"llama3-8b-8192".to_string(),
vec![Message {
role: Role::User,
content: Content::Text("How are you?".to_string()),
function_call: None,
}],
),
];
// Limit concurrency to 5
let responses = client.chat_completion_batch(requests, Some(5)).await?;
for (i, response) in responses.iter().enumerate() {
match response {
Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
Err(e) => println!("Request {} failed: {}", i, e),
}
}
Ok(())
}
Get list of supported models
§Returns
Result<Vec<String>, AiLibError>
- Returns model list on success, error on failure
Sourcepub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>
Switch AI model provider
§Arguments
provider
- New provider
§Returns
Result<(), AiLibError>
- Returns () on success, error on failure
§Example
use ai_lib::{AiClient, Provider};
let mut client = AiClient::new(Provider::Groq)?;
// Switch from Groq to Groq (demonstrating switch functionality)
client.switch_provider(Provider::Groq)?;
Sourcepub fn current_provider(&self) -> Provider
pub fn current_provider(&self) -> Provider
Get current provider
Sourcepub fn build_simple_request<S: Into<String>>(
&self,
prompt: S,
) -> ChatCompletionRequest
pub fn build_simple_request<S: Into<String>>( &self, prompt: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with the provider’s default chat model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.
Sourcepub fn build_simple_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_simple_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified chat model. This does NOT send the request.
Sourcepub fn build_multimodal_request<S: Into<String>>(
&self,
prompt: S,
) -> Result<ChatCompletionRequest, AiLibError>
pub fn build_multimodal_request<S: Into<String>>( &self, prompt: S, ) -> Result<ChatCompletionRequest, AiLibError>
Convenience helper: construct a request with the provider’s default multimodal model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.
Sourcepub fn build_multimodal_request_with_model<S: Into<String>>(
&self,
prompt: S,
model: S,
) -> ChatCompletionRequest
pub fn build_multimodal_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest
Convenience helper: construct a request with an explicitly specified multimodal model. This does NOT send the request.
Sourcepub async fn quick_chat_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_chat_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using the
default chat model, and return plain text content (first choice).
Sourcepub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using an
explicitly specified chat model, and return plain text content (first choice).
Sourcepub async fn quick_multimodal_text<P: Into<String>>(
provider: Provider,
prompt: P,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using the
default multimodal model, and return plain text content (first choice).
Sourcepub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>(
provider: Provider,
prompt: P,
model: M,
) -> Result<String, AiLibError>
pub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>
One-shot helper: create a client for provider
, send a single user prompt using an
explicitly specified multimodal model, and return plain text content (first choice).
Sourcepub async fn quick_chat_text_with_options<P: Into<String>>(
provider: Provider,
prompt: P,
options: ModelOptions,
) -> Result<String, AiLibError>
pub async fn quick_chat_text_with_options<P: Into<String>>( provider: Provider, prompt: P, options: ModelOptions, ) -> Result<String, AiLibError>
One-shot helper with model options: create a client for provider
, send a single user prompt
using specified model options, and return plain text content (first choice).