AiClient

Struct AiClient 

Source
pub struct AiClient { /* private fields */ }
Expand description

Unified AI client

Usage example:

use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    // Switch model provider by changing Provider value
    let client = AiClient::new(Provider::Groq)?;
     
    let request = ChatCompletionRequest::new(
        "test-model".to_string(),
        vec![Message {
            role: Role::User,
            content: ai_lib::types::common::Content::Text("Hello".to_string()),
            function_call: None,
        }],
    );
     
    // Note: Set GROQ_API_KEY environment variable for actual API calls
    // Optional: Set AI_PROXY_URL environment variable to use proxy server
    // let response = client.chat_completion(request).await?;
     
    println!("Client created successfully with provider: {:?}", client.current_provider());
    println!("Request prepared for model: {}", request.model);
     
    Ok(())
}

§Proxy Configuration

Configure proxy server by setting the AI_PROXY_URL environment variable:

export AI_PROXY_URL=http://proxy.example.com:8080

Supported proxy formats:

  • HTTP proxy: http://proxy.example.com:8080
  • HTTPS proxy: https://proxy.example.com:8080
  • With authentication: http://user:pass@proxy.example.com:8080

Implementations§

Source§

impl AiClient

Source

pub fn default_chat_model(&self) -> String

Get the effective default chat model for this client (honors custom override)

Source

pub fn new(provider: Provider) -> Result<Self, AiLibError>

Create a new AI client

§Arguments
  • provider - The AI model provider to use
§Returns
  • Result<Self, AiLibError> - Client instance on success, error on failure
§Example
use ai_lib::{AiClient, Provider};

let client = AiClient::new(Provider::Groq)?;
Source

pub fn with_options( provider: Provider, opts: ConnectionOptions, ) -> Result<Self, AiLibError>

Create client with minimal explicit options (base_url/proxy/timeout). Not all providers support overrides; unsupported providers ignore unspecified fields gracefully.

Source

pub fn connection_options(&self) -> Option<&ConnectionOptions>

Source

pub fn builder(provider: Provider) -> AiClientBuilder

Create a new AI client builder

The builder pattern allows more flexible client configuration:

  • Automatic environment variable detection
  • Support for custom base_url and proxy
  • Support for custom timeout and connection pool configuration
§Arguments
  • provider - The AI model provider to use
§Returns
  • AiClientBuilder - Builder instance
§Example
use ai_lib::{AiClient, Provider};

// Simplest usage - automatic environment variable detection
let client = AiClient::builder(Provider::Groq).build()?;

// Custom base_url and proxy
let client = AiClient::builder(Provider::Groq)
    .with_base_url("https://custom.groq.com")
    .with_proxy(Some("http://proxy.example.com:8080"))
    .build()?;
Source

pub fn new_with_metrics( provider: Provider, metrics: Arc<dyn Metrics>, ) -> Result<Self, AiLibError>

Create AiClient with injected metrics implementation

Source

pub fn with_metrics(self, metrics: Arc<dyn Metrics>) -> Self

Set metrics implementation on client

Source

pub async fn chat_completion( &self, request: ChatCompletionRequest, ) -> Result<ChatCompletionResponse, AiLibError>

Send chat completion request

§Arguments
  • request - Chat completion request
§Returns
  • Result<ChatCompletionResponse, AiLibError> - Response on success, error on failure
Source

pub async fn chat_completion_stream( &self, request: ChatCompletionRequest, ) -> Result<Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, AiLibError>

Streaming chat completion request

§Arguments
  • request - Chat completion request
§Returns
  • Result<impl Stream<Item = Result<ChatCompletionChunk, AiLibError>>, AiLibError> - Stream response on success
Source

pub async fn chat_completion_stream_with_cancel( &self, request: ChatCompletionRequest, ) -> Result<(Box<dyn Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin>, CancelHandle), AiLibError>

Streaming chat completion request with cancel control

§Arguments
  • request - Chat completion request
§Returns
  • Result<(impl Stream<Item = Result<ChatCompletionChunk, AiLibError>> + Send + Unpin, CancelHandle), AiLibError> - Returns streaming response and cancel handle on success
Source

pub async fn chat_completion_batch( &self, requests: Vec<ChatCompletionRequest>, concurrency_limit: Option<usize>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

Batch chat completion requests

§Arguments
  • requests - List of chat completion requests
  • concurrency_limit - Maximum concurrent request count (None means unlimited)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // Limit concurrency to 5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}
Source

pub async fn chat_completion_batch_smart( &self, requests: Vec<ChatCompletionRequest>, ) -> Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError>

Smart batch processing: automatically choose processing strategy based on request count

§Arguments
  • requests - List of chat completion requests
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
Source

pub async fn list_models(&self) -> Result<Vec<String>, AiLibError>

Batch chat completion requests

§Arguments
  • requests - List of chat completion requests
  • concurrency_limit - Maximum concurrent request count (None means unlimited)
§Returns
  • Result<Vec<Result<ChatCompletionResponse, AiLibError>>, AiLibError> - Returns response results for all requests
§Example
use ai_lib::{AiClient, Provider, ChatCompletionRequest, Message, Role};
use ai_lib::types::common::Content;

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let client = AiClient::new(Provider::Groq)?;
     
    let requests = vec![
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("Hello".to_string()),
                function_call: None,
            }],
        ),
        ChatCompletionRequest::new(
            "llama3-8b-8192".to_string(),
            vec![Message {
                role: Role::User,
                content: Content::Text("How are you?".to_string()),
                function_call: None,
            }],
        ),
    ];
     
    // Limit concurrency to 5
    let responses = client.chat_completion_batch(requests, Some(5)).await?;
     
    for (i, response) in responses.iter().enumerate() {
        match response {
            Ok(resp) => println!("Request {}: {}", i, resp.choices[0].message.content.as_text()),
            Err(e) => println!("Request {} failed: {}", i, e),
        }
    }
     
    Ok(())
}

Get list of supported models

§Returns
  • Result<Vec<String>, AiLibError> - Returns model list on success, error on failure
Source

pub fn switch_provider(&mut self, provider: Provider) -> Result<(), AiLibError>

Switch AI model provider

§Arguments
  • provider - New provider
§Returns
  • Result<(), AiLibError> - Returns () on success, error on failure
§Example
use ai_lib::{AiClient, Provider};

let mut client = AiClient::new(Provider::Groq)?;
// Switch from Groq to Groq (demonstrating switch functionality)
client.switch_provider(Provider::Groq)?;
Source

pub fn current_provider(&self) -> Provider

Get current provider

Source

pub fn build_simple_request<S: Into<String>>( &self, prompt: S, ) -> ChatCompletionRequest

Convenience helper: construct a request with the provider’s default chat model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.

Source

pub fn build_simple_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest

Convenience helper: construct a request with an explicitly specified chat model. This does NOT send the request.

Source

pub fn build_multimodal_request<S: Into<String>>( &self, prompt: S, ) -> Result<ChatCompletionRequest, AiLibError>

Convenience helper: construct a request with the provider’s default multimodal model. This does NOT send the request. Uses custom default model if set via AiClientBuilder, otherwise uses provider default.

Source

pub fn build_multimodal_request_with_model<S: Into<String>>( &self, prompt: S, model: S, ) -> ChatCompletionRequest

Convenience helper: construct a request with an explicitly specified multimodal model. This does NOT send the request.

Source

pub async fn quick_chat_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>

One-shot helper: create a client for provider, send a single user prompt using the default chat model, and return plain text content (first choice).

Source

pub async fn quick_chat_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>

One-shot helper: create a client for provider, send a single user prompt using an explicitly specified chat model, and return plain text content (first choice).

Source

pub async fn quick_multimodal_text<P: Into<String>>( provider: Provider, prompt: P, ) -> Result<String, AiLibError>

One-shot helper: create a client for provider, send a single user prompt using the default multimodal model, and return plain text content (first choice).

Source

pub async fn quick_multimodal_text_with_model<P: Into<String>, M: Into<String>>( provider: Provider, prompt: P, model: M, ) -> Result<String, AiLibError>

One-shot helper: create a client for provider, send a single user prompt using an explicitly specified multimodal model, and return plain text content (first choice).

Source

pub async fn quick_chat_text_with_options<P: Into<String>>( provider: Provider, prompt: P, options: ModelOptions, ) -> Result<String, AiLibError>

One-shot helper with model options: create a client for provider, send a single user prompt using specified model options, and return plain text content (first choice).

Auto Trait Implementations§

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> PolicyExt for T
where T: ?Sized,

Source§

fn and<P, B, E>(self, other: P) -> And<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow only if self and other return Action::Follow. Read more
Source§

fn or<P, B, E>(self, other: P) -> Or<T, P>
where T: Policy<B, E>, P: Policy<B, E>,

Create a new Policy that returns Action::Follow if either self or other returns Action::Follow. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

Source§

fn vzip(self) -> V

Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

impl<T> ErasedDestructor for T
where T: 'static,