openai_struct/models/assistants_api_response_format_option.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11use crate::{ResponseFormatJsonObject, ResponseFormatJsonSchema, ResponseFormatText};
12/// pub AssistantsApiResponseFormatOption : Specifies the format that the model must output. Compatible with [GPT-4o](/docs/models#gpt-4o), [GPT-4 Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. Setting to `{ \"type\": \"json_schema\", \"json_schema\": {...} }` enables Structured Outputs which ensures the model will match your supplied JSON schema. Learn more in the [Structured Outputs guide](/docs/guides/structured-outputs). Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which ensures the message the model generates is valid JSON. **pub Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
13
14#[allow(unused_imports)]
15use serde_json::Value;
16
17/// # on openapi.yaml
18///
19/// ```yaml
20/// AssistantsApiResponseFormatOption:
21/// description: >
22/// Specifies the format that the model must output. Compatible with
23/// [GPT-4o](/docs/models#gpt-4o), [GPT-4
24/// Turbo](/docs/models#gpt-4-turbo-and-gpt-4), and all GPT-3.5 Turbo models
25/// since `gpt-3.5-turbo-1106`.
26///
27///
28/// Setting to `{ "type": "json_schema", "json_schema": {...} }` enables
29/// Structured Outputs which ensures the model will match your supplied JSON
30/// schema. Learn more in the [Structured Outputs
31/// guide](/docs/guides/structured-outputs).
32///
33///
34/// Setting to `{ "type": "json_object" }` enables JSON mode, which ensures
35/// the message the model generates is valid JSON.
36///
37///
38/// **Important:** when using JSON mode, you **must** also instruct the
39/// model to produce JSON yourself via a system or user message. Without
40/// this, the model may generate an unending stream of whitespace until the
41/// generation reaches the token limit, resulting in a long-running and
42/// seemingly "stuck" request. Also note that the message content may be
43/// partially cut off if `finish_reason="length"`, which indicates the
44/// generation exceeded `max_tokens` or the conversation exceeded the max
45/// context length.
46/// oneOf:
47/// - type: string
48/// description: |
49/// `auto` is the default value
50/// enum:
51/// - auto
52/// x-stainless-const: true
53/// - $ref: "#/components/schemas/ResponseFormatText"
54/// - $ref: "#/components/schemas/ResponseFormatJsonObject"
55/// - $ref: "#/components/schemas/ResponseFormatJsonSchema"
56/// ```
57#[derive(Debug, Serialize, Deserialize)]
58#[serde(tag = "type")]
59pub enum AssistantsApiResponseFormatOption {
60 #[serde(rename = "text")]
61 Text(ResponseFormatText),
62 #[serde(rename = "json_object")]
63 Object(ResponseFormatJsonObject),
64 #[serde(rename = "json_schema")]
65 Schema(ResponseFormatJsonSchema),
66}