1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
/*
* OpenAI API
*
* The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details.
*
* The version of the OpenAPI document: 2.3.0
*
* Generated by: https://openapi-generator.tech
*/
use crate::models;
use serde::{Deserialize, Serialize};
/// CreateChatCompletionStreamResponse : Represents a streamed chunk of a chat completion response returned by the model, based on the provided input. [Learn more](/docs/guides/streaming-responses).
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, bon::Builder)]
pub struct CreateChatCompletionStreamResponse {
/// A unique identifier for the chat completion. Each chunk has the same ID.
#[serde(rename = "id")]
pub id: String,
/// A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the last chunk if you set `stream_options: {\"include_usage\": true}`.
#[serde(rename = "choices")]
pub choices: Vec<models::CreateChatCompletionStreamResponseChoicesInner>,
/// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
#[serde(rename = "created")]
pub created: i32,
/// The model to generate the completion.
#[serde(rename = "model")]
pub model: String,
#[serde(
rename = "service_tier",
default,
with = "::serde_with::rust::double_option",
skip_serializing_if = "Option::is_none"
)]
pub service_tier: Option<Option<models::ServiceTier>>,
/// This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
#[serde(rename = "system_fingerprint", skip_serializing_if = "Option::is_none")]
pub system_fingerprint: Option<String>,
/// The object type, which is always `chat.completion.chunk`.
#[serde(rename = "object")]
pub object: Object,
#[serde(rename = "usage", skip_serializing_if = "Option::is_none")]
pub usage: Option<Box<models::CompletionUsage>>,
}
impl CreateChatCompletionStreamResponse {
/// Represents a streamed chunk of a chat completion response returned by the model, based on the provided input. [Learn more](/docs/guides/streaming-responses).
pub fn new(
id: String,
choices: Vec<models::CreateChatCompletionStreamResponseChoicesInner>,
created: i32,
model: String,
object: Object,
) -> CreateChatCompletionStreamResponse {
CreateChatCompletionStreamResponse {
id,
choices,
created,
model,
service_tier: None,
system_fingerprint: None,
object,
usage: None,
}
}
}
/// The object type, which is always `chat.completion.chunk`.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)]
pub enum Object {
#[serde(rename = "chat.completion.chunk")]
ChatCompletionChunk,
}
impl Default for Object {
fn default() -> Object {
Self::ChatCompletionChunk
}
}
impl std::fmt::Display for CreateChatCompletionStreamResponse {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match serde_json::to_string(self) {
Ok(s) => write!(f, "{}", s),
Err(_) => Err(std::fmt::Error),
}
}
}