pub struct CreateChatCompletionRequest_Variant2 {Show 26 fields
pub messages: Vec<ChatCompletionRequestMessage>,
pub model: ModelIdsShared,
pub modalities: Option<ResponseModalities>,
pub reasoning_effort: Option<ReasoningEffort>,
pub max_completion_tokens: Option<i64>,
pub frequency_penalty: Option<f64>,
pub presence_penalty: Option<f64>,
pub web_search_options: Option<CreateChatCompletionRequest_Variant2_WebSearchOptions>,
pub top_logprobs: Option<i64>,
pub response_format: Option<CreateChatCompletionRequest_Variant2_ResponseFormat>,
pub audio: Option<CreateChatCompletionRequest_Variant2_Audio>,
pub store: Option<bool>,
pub stream: Option<bool>,
pub stop: Option<StopConfiguration>,
pub logit_bias: Option<CreateChatCompletionRequest_Variant2_LogitBias>,
pub logprobs: Option<bool>,
pub max_tokens: Option<i64>,
pub n: Option<i64>,
pub prediction: Option<PredictionContent>,
pub seed: Option<i64>,
pub stream_options: Option<ChatCompletionStreamOptions>,
pub tools: Option<Vec<ChatCompletionTool>>,
pub tool_choice: Option<ChatCompletionToolChoiceOption>,
pub parallel_tool_calls: Option<ParallelToolCalls>,
pub function_call: Option<CreateChatCompletionRequest_Variant2_FunctionCall>,
pub functions: Option<Vec<ChatCompletionFunctions>>,
}
Fields§
§messages: Vec<ChatCompletionRequestMessage>
A list of messages comprising the conversation so far.
model: ModelIdsShared
Model ID used to generate the response, like gpt-4o
or o3
.
modalities: Option<ResponseModalities>
§reasoning_effort: Option<ReasoningEffort>
§max_completion_tokens: Option<i64>
An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
frequency_penalty: Option<f64>
Number between -2.0 and 2.0.
presence_penalty: Option<f64>
Number between -2.0 and 2.0.
web_search_options: Option<CreateChatCompletionRequest_Variant2_WebSearchOptions>
§top_logprobs: Option<i64>
An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.
response_format: Option<CreateChatCompletionRequest_Variant2_ResponseFormat>
§audio: Option<CreateChatCompletionRequest_Variant2_Audio>
§store: Option<bool>
Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
stream: Option<bool>
If set to true, the model response data will be streamed to the client as it is generated using server-sent events.
stop: Option<StopConfiguration>
§logit_bias: Option<CreateChatCompletionRequest_Variant2_LogitBias>
Modify the likelihood of specified tokens appearing in the completion.
logprobs: Option<bool>
Whether to return log probabilities of the output tokens or not.
max_tokens: Option<i64>
The maximum number of tokens that can be generated in the chat completion.
n: Option<i64>
How many chat completion choices to generate for each input message.
prediction: Option<PredictionContent>
§seed: Option<i64>
This feature is in Beta.
stream_options: Option<ChatCompletionStreamOptions>
§tools: Option<Vec<ChatCompletionTool>>
A list of tools the model may call.
tool_choice: Option<ChatCompletionToolChoiceOption>
§parallel_tool_calls: Option<ParallelToolCalls>
§function_call: Option<CreateChatCompletionRequest_Variant2_FunctionCall>
§functions: Option<Vec<ChatCompletionFunctions>>
Deprecated in favor of tools
.
Trait Implementations§
Source§impl Clone for CreateChatCompletionRequest_Variant2
impl Clone for CreateChatCompletionRequest_Variant2
Source§fn clone(&self) -> CreateChatCompletionRequest_Variant2
fn clone(&self) -> CreateChatCompletionRequest_Variant2
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read more