openai_struct/models/
prediction_content.rs

1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https:///platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https:///github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub PredictionContent : Static predicted output content, such as the content of a text file that is being regenerated.
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16/// # on openapi.yaml
17///
18/// ```yaml
19/// PredictionContent:
20///   type: object
21///   title: Static Content
22///   description: >
23///     Static predicted output content, such as the content of a text file that
24///     is
25///
26///     being regenerated.
27///   required:
28///     - type
29///     - content
30///   properties:
31///     type:
32///       type: string
33///       enum:
34///         - content
35///       description: |
36///         The type of the predicted content you want to provide. This type is
37///         currently always `content`.
38///       x-stainless-const: true
39///     content:
40///       description: >
41///         The content that should be matched when generating a model response.
42///
43///         If generated tokens would match this content, the entire model
44///         response
45///
46///         can be returned much more quickly.
47///       oneOf:
48///         - type: string
49///           title: Text content
50///           description: |
51///             The content used for a Predicted Output. This is often the
52///             text of a file you are regenerating with minor changes.
53///         - type: array
54///           description:
55///             An array of content parts with a defined type. Supported options
56///             differ based on the [model](/docs/models) being used to generate
57///             the response. Can contain text inputs.
58///           title: Array of content parts
59///           items:
60///             $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText"
61///           minItems: 1
62/// ```
63#[derive(Debug, Serialize, Deserialize)]
64pub struct PredictionContent {
65    /// The content that should be matched when generating a model response. If generated tokens would match this content, the entire model response can be returned much more quickly.
66    #[serde(rename = "content")]
67    pub content: PredictionContentContent,
68    /// The type of the predicted content you want to provide. This type is currently always `content`.
69    #[serde(rename = "type")]
70    pub _type: String,
71}
72
73/// # on openai.yaml
74///
75/// ```yaml
76/// oneOf:
77///   - type: string
78///     title: Text content
79///     description: |
80///       The content used for a Predicted Output. This is often the
81///       text of a file you are regenerating with minor changes.
82///   - type: array
83///     description:
84///       An array of content parts with a defined type. Supported options
85///       differ based on the [model](/docs/models) being used to generate
86///       the response. Can contain text inputs.
87///     title: Array of content parts
88///     items:
89///       $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText"
90///     minItems: 1
91/// ```
92#[derive(Debug, Serialize, Deserialize)]
93#[serde(untagged)]
94pub enum PredictionContentContent {
95    Text(String),
96    Array(Vec<crate::ChatCompletionRequestMessageContentPartText>),
97}