pub struct ChatRequest {Show 16 fields
pub model: String,
pub messages: Vec<Message>,
pub temperature: Option<f32>,
pub top_p: Option<f32>,
pub max_tokens: Option<u32>,
pub stream: Option<bool>,
pub stop: Option<Vec<String>>,
pub presence_penalty: Option<f32>,
pub frequency_penalty: Option<f32>,
pub logit_bias: Option<HashMap<String, f32>>,
pub user: Option<String>,
pub seed: Option<u64>,
pub tools: Option<Vec<Tool>>,
pub tool_choice: Option<ToolChoice>,
pub response_format: Option<ResponseFormat>,
pub enable_thinking: Option<bool>,
}Expand description
Chat completion request
Fields§
§model: StringModel identifier (e.g., “openai/gpt-4”, “deepseek/deepseek-chat”)
messages: Vec<Message>List of messages in the conversation
temperature: Option<f32>Sampling temperature (0.0 to 2.0)
top_p: Option<f32>Nucleus sampling parameter (0.0 to 1.0)
max_tokens: Option<u32>Maximum number of tokens to generate
stream: Option<bool>Whether to stream the response
stop: Option<Vec<String>>Stop sequences
presence_penalty: Option<f32>Presence penalty (-2.0 to 2.0)
frequency_penalty: Option<f32>Frequency penalty (-2.0 to 2.0)
logit_bias: Option<HashMap<String, f32>>Logit bias
user: Option<String>User identifier
seed: Option<u64>Random seed for deterministic outputs
tools: Option<Vec<Tool>>Tools available to the model
tool_choice: Option<ToolChoice>Tool choice strategy
response_format: Option<ResponseFormat>Response format specification
enable_thinking: Option<bool>Enable thinking/reasoning mode (provider-specific)
For Aliyun: Enables reasoning content for hybrid models like qwen-plus For other providers: May be ignored
Implementations§
Source§impl ChatRequest
impl ChatRequest
Sourcepub fn new_with_messages(
model: impl Into<String>,
messages: Vec<Message>,
) -> Self
pub fn new_with_messages( model: impl Into<String>, messages: Vec<Message>, ) -> Self
Create a new chat request with model and initial messages
Sourcepub fn with_messages(self, messages: Vec<Message>) -> Self
pub fn with_messages(self, messages: Vec<Message>) -> Self
Set the messages for the request
Sourcepub fn add_message(self, message: Message) -> Self
pub fn add_message(self, message: Message) -> Self
Add a single message to the request
Sourcepub fn with_temperature(self, temperature: f32) -> Self
pub fn with_temperature(self, temperature: f32) -> Self
Set the temperature
Sourcepub fn with_top_p(self, top_p: f32) -> Self
pub fn with_top_p(self, top_p: f32) -> Self
Set the top_p parameter
Sourcepub fn with_max_tokens(self, max_tokens: u32) -> Self
pub fn with_max_tokens(self, max_tokens: u32) -> Self
Set the maximum number of tokens
Sourcepub fn with_stream(self, stream: bool) -> Self
pub fn with_stream(self, stream: bool) -> Self
Enable streaming
Sourcepub fn with_presence_penalty(self, penalty: f32) -> Self
pub fn with_presence_penalty(self, penalty: f32) -> Self
Set presence penalty
Sourcepub fn with_frequency_penalty(self, penalty: f32) -> Self
pub fn with_frequency_penalty(self, penalty: f32) -> Self
Set frequency penalty
Sourcepub fn with_tools(self, tools: Vec<Tool>) -> Self
pub fn with_tools(self, tools: Vec<Tool>) -> Self
Set the tools
Sourcepub fn with_tool_choice(self, tool_choice: ToolChoice) -> Self
pub fn with_tool_choice(self, tool_choice: ToolChoice) -> Self
Set the tool choice
Sourcepub fn with_response_format(self, format: ResponseFormat) -> Self
pub fn with_response_format(self, format: ResponseFormat) -> Self
Set the response format
Sourcepub fn with_enable_thinking(self, enable: bool) -> Self
pub fn with_enable_thinking(self, enable: bool) -> Self
Enable thinking/reasoning mode
For Aliyun: Enables reasoning content for hybrid models
Trait Implementations§
Source§impl Clone for ChatRequest
impl Clone for ChatRequest
Source§fn clone(&self) -> ChatRequest
fn clone(&self) -> ChatRequest
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more