openai_struct/models/create_chat_completion_stream_response.rs
1/*
2 * OpenAI API
3 *
4 * The OpenAI REST API. Please see pub https://platform.openai.com/docs/api-reference for more details.
5 *
6 * OpenAPI spec pub version: 2.3.0
7 *
8 * Generated pub by: https://github.com/swagger-api/swagger-codegen.git
9 */
10
11/// pub CreateChatCompletionStreamResponse : Represents a streamed chunk of a chat completion response returned by the model, based on the provided input. [Learn more](/docs/guides/streaming-responses).
12
13#[allow(unused_imports)]
14use serde_json::Value;
15
16#[derive(Debug, Serialize, Deserialize)]
17pub struct CreateChatCompletionStreamResponse {
18 /// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`.
19 #[serde(rename = "choices")]
20 pub choices: Vec<crate::models::CreateChatCompletionStreamResponseChoices>,
21 /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
22 #[serde(rename = "created")]
23 pub created: i32,
24 /// A unique identifier for the chat completion. Each chunk has the same ID.
25 #[serde(rename = "id")]
26 pub id: String,
27 /// The model to generate the completion.
28 #[serde(rename = "model")]
29 pub model: String,
30 /// The object type, which is always `chat.completion.chunk`.
31 #[serde(rename = "object")]
32 pub object: String,
33 #[serde(rename = "service_tier")]
34 pub service_tier: Option<crate::models::ServiceTier>,
35 /// This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
36 #[serde(rename = "system_fingerprint")]
37 pub system_fingerprint: Option<String>,
38 /// An optional field that will only be present when you set `stream_options: {\"include_usage\": true}` in your request. When present, it contains a null value **except for the last chunk** which contains the token usage statistics for the entire request. **NOTE:** If the stream is interrupted or cancelled, you may not receive the final usage chunk which contains the total token usage for the request.
39 #[serde(rename = "usage")]
40 pub usage: Option<crate::models::CompletionUsage>,
41}
42
43impl Default for CreateChatCompletionStreamResponse {
44 fn default() -> CreateChatCompletionStreamResponse {
45 Self {
46 choices: vec![],
47 created: 0,
48 id: "".to_string(),
49 model: "".to_string(),
50 object: "".to_string(),
51 service_tier: None,
52 system_fingerprint: None,
53 usage: None,
54 }
55 }
56}