pub struct ChatRequestBuilder<'a> { /* private fields */ }Expand description
Builder for chat requests.
Implementations§
Source§impl<'a> ChatRequestBuilder<'a>
impl<'a> ChatRequestBuilder<'a>
Sourcepub fn messages(self, messages: Vec<Message>) -> ChatRequestBuilder<'a>
pub fn messages(self, messages: Vec<Message>) -> ChatRequestBuilder<'a>
Add messages to the conversation.
Sourcepub fn temperature(self, temp: f64) -> ChatRequestBuilder<'a>
pub fn temperature(self, temp: f64) -> ChatRequestBuilder<'a>
Set temperature.
Sourcepub fn max_tokens(self, max: u32) -> ChatRequestBuilder<'a>
pub fn max_tokens(self, max: u32) -> ChatRequestBuilder<'a>
Set max tokens.
Sourcepub fn stream(self) -> ChatRequestBuilder<'a>
pub fn stream(self) -> ChatRequestBuilder<'a>
Enable streaming.
Sourcepub fn tools(self, tools: Vec<ToolDefinition>) -> ChatRequestBuilder<'a>
pub fn tools(self, tools: Vec<ToolDefinition>) -> ChatRequestBuilder<'a>
Set tools for function calling.
Sourcepub fn tool_choice(self, tool_choice: Value) -> ChatRequestBuilder<'a>
pub fn tool_choice(self, tool_choice: Value) -> ChatRequestBuilder<'a>
Set tool_choice (OpenAI-style).
Sourcepub fn tools_json(self, tools: Vec<Value>) -> ChatRequestBuilder<'a>
pub fn tools_json(self, tools: Vec<Value>) -> ChatRequestBuilder<'a>
Set tools from raw JSON values (e.g., from existing JSON Schema tool definitions).
Convenience method for integrating with tool systems that produce serde_json::Value.
Values that fail to deserialize into ToolDefinition are skipped.
Sourcepub fn model(self, model: impl Into<String>) -> ChatRequestBuilder<'a>
pub fn model(self, model: impl Into<String>) -> ChatRequestBuilder<'a>
Override the model for this request.
When set, this overrides the client’s default model. Useful for single-client multi-model usage (e.g., same API key with different models).
§Example
let client = AiClient::new("openai/gpt-4o").await?;
let resp = client.chat()
.messages(msgs)
.model("gpt-4o-mini") // Use different model for this request
.execute()
.await?;Sourcepub fn response_format(self, cfg: JsonModeConfig) -> ChatRequestBuilder<'a>
pub fn response_format(self, cfg: JsonModeConfig) -> ChatRequestBuilder<'a>
Enable structured output using JSON mode configuration (OpenAI-style response_format).
Sourcepub async fn execute_stream(
self,
) -> Result<Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, Error>
pub async fn execute_stream( self, ) -> Result<Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, Error>
Execute the request and return a stream of events.
Sourcepub async fn execute_stream_with_cancel_and_stats(
self,
) -> Result<(Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, CancelHandle, CallStats), Error>
pub async fn execute_stream_with_cancel_and_stats( self, ) -> Result<(Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, CancelHandle, CallStats), Error>
Execute the request and return a cancellable stream of events plus per-call stats.
Streaming semantics:
- retry/fallback may happen only before any event is emitted to the caller
- once an event is emitted, we will not retry automatically to avoid duplicate output
Sourcepub async fn execute_stream_with_cancel(
self,
) -> Result<(Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, CancelHandle), Error>
pub async fn execute_stream_with_cancel( self, ) -> Result<(Pin<Box<dyn Stream<Item = Result<StreamingEvent, Error>> + Send>>, CancelHandle), Error>
Execute the request and return a cancellable stream of events.
Returns a stream and a CancelHandle. Call cancel_handle.cancel() to stop
the stream early (e.g., when the user abandons the request).
§Example
let (mut stream, cancel_handle) = client.chat()
.messages(msgs)
.stream()
.execute_stream_with_cancel()
.await?;
// In another task or on user cancel:
cancel_handle.cancel();
while let Some(event) = stream.next().await {
match event? {
StreamingEvent::StreamEnd { .. } => break,
ev => process(ev),
}
}Sourcepub async fn execute(self) -> Result<UnifiedResponse, Error>
pub async fn execute(self) -> Result<UnifiedResponse, Error>
Execute the request and return the complete response.