pub struct CreateChatCompletionRequest {Show 31 fields
pub metadata: Option<Metadata>,
pub temperature: Option<f64>,
pub top_p: Option<f64>,
pub user: Option<String>,
pub service_tier: Option<ServiceTier>,
pub messages: Vec<ChatCompletionRequestMessage>,
pub model: ModelIdsShared,
pub modalities: Option<ResponseModalities>,
pub reasoning_effort: Option<ReasoningEffort>,
pub max_completion_tokens: Option<i64>,
pub frequency_penalty: Option<f64>,
pub presence_penalty: Option<f64>,
pub web_search_options: Option<CreateChatCompletionRequest_Variant2_WebSearchOptions>,
pub top_logprobs: Option<i64>,
pub response_format: Option<CreateChatCompletionRequest_Variant2_ResponseFormat>,
pub audio: Option<CreateChatCompletionRequest_Variant2_Audio>,
pub store: Option<bool>,
pub stream: Option<bool>,
pub stop: Option<StopConfiguration>,
pub logit_bias: Option<CreateChatCompletionRequest_LogitBias>,
pub logprobs: Option<bool>,
pub max_tokens: Option<i64>,
pub n: Option<i64>,
pub prediction: Option<PredictionContent>,
pub seed: Option<i64>,
pub stream_options: Option<ChatCompletionStreamOptions>,
pub tools: Option<Vec<ChatCompletionTool>>,
pub tool_choice: Option<ChatCompletionToolChoiceOption>,
pub parallel_tool_calls: Option<ParallelToolCalls>,
pub function_call: Option<CreateChatCompletionRequest_Variant2_FunctionCall>,
pub functions: Option<Vec<ChatCompletionFunctions>>,
}
Fields§
§metadata: Option<Metadata>
§temperature: Option<f64>
What sampling temperature to use, between 0 and 2.
top_p: Option<f64>
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
user: Option<String>
A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
service_tier: Option<ServiceTier>
§messages: Vec<ChatCompletionRequestMessage>
A list of messages comprising the conversation so far.
model: ModelIdsShared
Model ID used to generate the response, like gpt-4o
or o3
.
modalities: Option<ResponseModalities>
§reasoning_effort: Option<ReasoningEffort>
§max_completion_tokens: Option<i64>
An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
frequency_penalty: Option<f64>
Number between -2.0 and 2.0.
presence_penalty: Option<f64>
Number between -2.0 and 2.0.
web_search_options: Option<CreateChatCompletionRequest_Variant2_WebSearchOptions>
§top_logprobs: Option<i64>
An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability.
response_format: Option<CreateChatCompletionRequest_Variant2_ResponseFormat>
§audio: Option<CreateChatCompletionRequest_Variant2_Audio>
§store: Option<bool>
Whether or not to store the output of this chat completion request for use in our model distillation or evals products.
stream: Option<bool>
If set to true, the model response data will be streamed to the client as it is generated using server-sent events.
stop: Option<StopConfiguration>
§logit_bias: Option<CreateChatCompletionRequest_LogitBias>
Modify the likelihood of specified tokens appearing in the completion.
logprobs: Option<bool>
Whether to return log probabilities of the output tokens or not.
max_tokens: Option<i64>
The maximum number of tokens that can be generated in the chat completion.
n: Option<i64>
How many chat completion choices to generate for each input message.
prediction: Option<PredictionContent>
§seed: Option<i64>
This feature is in Beta.
stream_options: Option<ChatCompletionStreamOptions>
§tools: Option<Vec<ChatCompletionTool>>
A list of tools the model may call.
tool_choice: Option<ChatCompletionToolChoiceOption>
§parallel_tool_calls: Option<ParallelToolCalls>
§function_call: Option<CreateChatCompletionRequest_Variant2_FunctionCall>
§functions: Option<Vec<ChatCompletionFunctions>>
Deprecated in favor of tools
.
Trait Implementations§
Source§impl Clone for CreateChatCompletionRequest
impl Clone for CreateChatCompletionRequest
Source§fn clone(&self) -> CreateChatCompletionRequest
fn clone(&self) -> CreateChatCompletionRequest
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source
. Read more