pub struct CreateChatCompletionRequest {
pub max_tokens: Option<i64>,
pub messages: Vec<Message>,
pub model: String,
pub reasoning_format: Option<String>,
pub stream: bool,
pub stream_options: Option<ChatCompletionStreamOptions>,
pub tools: Vec<ChatCompletionTool>,
}Expand description
CreateChatCompletionRequest
JSON schema
{
"type": "object",
"required": [
"messages",
"model"
],
"properties": {
"max_tokens": {
"description": "An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.\n",
"type": "integer"
},
"messages": {
"description": "A list of messages comprising the conversation so far.\n",
"type": "array",
"items": {
"$ref": "#/definitions/Message"
},
"minItems": 1
},
"model": {
"description": "Model ID to use",
"type": "string"
},
"reasoning_format": {
"description": "The format of the reasoning content. Can be `raw` or `parsed`.\nWhen specified as raw some reasoning models will output <think /> tags. When specified as parsed the model will output the reasoning under `reasoning` or `reasoning_content` attribute.\n",
"type": "string"
},
"stream": {
"description": "If set to true, the model response data will be streamed to the client as it is generated using [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format).\n",
"default": false,
"type": "boolean"
},
"stream_options": {
"$ref": "#/definitions/ChatCompletionStreamOptions"
},
"tools": {
"description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.\n",
"type": "array",
"items": {
"$ref": "#/definitions/ChatCompletionTool"
}
}
}
}Fields§
§max_tokens: Option<i64>An upper bound for the number of tokens that can be generated for a completion, including visible output tokens and reasoning tokens.
messages: Vec<Message>A list of messages comprising the conversation so far.
model: StringModel ID to use
reasoning_format: Option<String>The format of the reasoning content. Can be raw or parsed.
When specified as raw some reasoning models will output reasoning or reasoning_content attribute.
stream: boolIf set to true, the model response data will be streamed to the client as it is generated using server-sent events.
stream_options: Option<ChatCompletionStreamOptions>§tools: Vec<ChatCompletionTool>A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
Trait Implementations§
Source§impl Clone for CreateChatCompletionRequest
impl Clone for CreateChatCompletionRequest
Source§fn clone(&self) -> CreateChatCompletionRequest
fn clone(&self) -> CreateChatCompletionRequest
1.0.0 (const: unstable) · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read more