async_llm/response/
chat.rs

1use serde::{Deserialize, Serialize};
2
3use crate::{
4    types::{ChatChoice, ChatChoiceStream, CompletionUsage, CompletionUsageStream},
5    Error, Printable,
6};
7
8use super::Respondable;
9
10#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
11pub struct ChatResponse {
12    /// A unique identifier for the chat completion.
13    pub id: Option<String>,
14
15    /// A list of chat completion choices. Can be more than one if n is greater than 1.
16    pub choices: Vec<ChatChoice>,
17
18    /// The Unix timestamp (in seconds) of when the chat completion was created.
19    pub created: Option<u32>,
20
21    /// The model used for the chat completion.
22    pub model: Option<String>,
23
24    /// The service tier used for processing the request.
25    pub service_tier: Option<String>,
26
27    /// This fingerprint represents the backend configuration that the model runs with.
28    ///
29    /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
30    pub system_fingerprint: Option<String>,
31
32    /// The object type, which is always `chat.completion`.
33    pub object: Option<String>,
34
35    /// Usage statistics for the completion request.
36    pub usage: Option<CompletionUsage>,
37}
38
39#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
40pub struct ChatResponseStream {
41    /// A unique identifier for the chat completion. Each chunk has the same ID.
42    pub id: Option<String>,
43
44    /// A list of chat completion choices. Can contain more than one elements if n is greater than 1. Can also be empty for the last chunk if you set stream_options: {"include_usage": true}.
45    pub choices: Vec<ChatChoiceStream>,
46
47    /// The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp.
48    pub created: Option<u32>,
49
50    /// The model to generate the completion.
51    pub model: Option<String>,
52
53    /// The service tier used for processing the request.
54    pub service_tier: Option<String>,
55
56    /// This fingerprint represents the backend configuration that the model runs with.
57    ///
58    /// Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
59    pub system_fingerprint: Option<String>,
60
61    /// The object type, which is always `chat.completion.chunk`.
62    pub object: Option<String>,
63
64    /// Usage statistics for the completion request.
65    pub usage: Option<CompletionUsageStream>,
66}
67
68impl Respondable for ChatResponse {
69    fn is_success(&self) -> bool {
70        true
71    }
72}
73
74impl Printable for ChatResponse {
75    fn to_string_pretty(&self) -> Result<String, Error> {
76        Ok(serde_json::to_string_pretty(self)?)
77    }
78}
79
80impl Respondable for ChatResponseStream {
81    fn is_success(&self) -> bool {
82        true
83    }
84}
85
86impl Printable for ChatResponseStream {
87    fn to_string_pretty(&self) -> Result<String, Error> {
88        Ok(serde_json::to_string_pretty(self)?)
89    }
90}