openai_interface/chat/
response.rs

1pub mod streaming {
2    use std::str::FromStr;
3
4    use serde::Deserialize;
5
6    use crate::errors::ResponseError;
7
8    #[derive(Debug, Deserialize)]
9    pub struct ChatCompletionChunk {
10        /// A unique identifier for the chat completion.
11        pub id: String,
12        /// A list of chat completion choices. Can be more than one
13        /// if `n` is greater than 1. Can also be empty for the last chunk if you set
14        /// `stream_options: {"include_usage": true}`.
15        pub choices: Vec<CompletionChunkChoice>,
16        /// The Unix timestamp (in seconds) of when the chat completion was created.
17        /// Each chunk has the same timestamp.
18        pub created: u64,
19        /// The model used for the chat completion.
20        pub model: String,
21        /// The object type, which is always `chat.completion.chunk`
22        pub object: ChatCompletionChunkObject,
23        /// This fingerprint represents the backend configuration that the model runs with.
24        /// Can be used in conjunction with the `seed` request parameter to understand when
25        /// backend changes have been made that might impact determinism.
26        pub service_tier: Option<ServiceTier>,
27        /// This fingerprint represents the backend configuration that the model runs with.
28        /// Can be used in conjunction with the `seed` request parameter to understand when
29        /// backend changes have been made that might impact determinism.
30        pub system_fingerprint: Option<String>,
31        /// An optional field that will only be present when you set
32        /// `stream_options: {"include_usage": true}` in your request. When present, it
33        /// contains a null value **except for the last chunk** which contains the token
34        /// usage statistics for the entire request.
35        ///
36        /// **NOTE:** If the stream is interrupted or cancelled, you may not receive the
37        /// final usage chunk which contains the total token usage for the request.
38        pub usage: Option<CompletionUsage>,
39    }
40
41    #[derive(Debug, Deserialize)]
42    pub enum ChatCompletionChunkObject {
43        #[serde(rename = "chat.completion.chunk")]
44        ChatCompletionChunk,
45    }
46
47    /// The service tier used for processing the request.
48    ///
49    /// This enum represents the different service tiers that can be specified when
50    /// making a request to the API. Each tier corresponds to different performance
51    /// characteristics and pricing models.
52    #[derive(Debug, Deserialize)]
53    #[serde(rename_all = "lowercase")]
54    pub enum ServiceTier {
55        /// Automatically select the service tier based on project settings.
56        Auto,
57        /// Use the default service tier with standard pricing and performance.
58        Default,
59        /// Use the flex service tier for flexible processing requirements.
60        Flex,
61        /// Use the scale service tier for scalable processing needs.
62        Scale,
63        /// Use the priority service tier for high-priority requests.
64        Priority,
65    }
66
67    #[derive(Debug, Deserialize)]
68    pub struct CompletionChunkChoice {
69        /// A chat completion delta generated by streamed model responses.
70        pub delta: ChoiceDelta,
71        /// The index of the choice in the list of choices.
72        pub index: u32,
73        /// Log probability information for the choice.
74        pub logprobs: Option<ChoiceLogprobs>,
75        /// The reason the model stopped generating tokens.
76        ///
77        /// This will be `stop` if the model hit a natural stop point or a provided stop
78        /// sequence, `length` if the maximum number of tokens specified in the request was
79        /// reached, `content_filter` if content was omitted due to a flag from our content
80        /// filters, `tool_calls` if the model called a tool, or `function_call`
81        /// (deprecated) if the model called a function.
82        pub finish_reason: Option<FinishReason>,
83    }
84
85    #[derive(Debug, Deserialize)]
86    #[serde(rename_all = "snake_case")]
87    pub enum FinishReason {
88        /// The maximum number of tokens specified in the request was reached.
89        Length,
90        /// The model hit a natural stop point or a provided stop sequence.
91        Stop,
92        /// Content was omitted due to a flag from our content filters.
93        ContentFilter,
94        /// The model called a function (deprecated).
95        FunctionCall,
96        /// The model called a tool.
97        ToolCalls,
98        /// This choice can only be found in the manual of DeepSeek.
99        InsufficientSystemResource,
100    }
101
102    #[derive(Debug, Deserialize)]
103    pub struct ChoiceDelta {
104        /// The contents of the chunk message.
105        #[serde(flatten)]
106        pub content: Option<CompletionContent>,
107        /// Deprecated and replaced by `tool_calls`.
108        ///
109        /// The name and arguments of a function that should be called, as generated by the
110        /// model.
111        pub function_call: Option<ChoiceDeltaFunctionCall>,
112        /// The refusal message generated by the model.
113        pub refusal: Option<String>,
114        /// The role of the author of this message.
115        pub role: Option<CompletionRole>,
116        /// A list of tool calls generated by the model, such as function calls.
117        pub tool_calls: Option<Vec<ChoiceDeltaToolCall>>,
118    }
119
120    #[derive(Debug, Deserialize)]
121    pub struct ChoiceDeltaToolCallFunction {
122        /// The arguments to call the function with, as generated by the model in JSON
123        /// format. Note that the model does not always generate valid JSON, and may
124        /// hallucinate parameters not defined by your function schema. Validate the
125        /// arguments in your code before calling your function.
126        pub arguments: Option<String>,
127        /// The name of the function to call.
128        pub name: Option<String>,
129    }
130
131    #[derive(Debug, Deserialize)]
132    pub struct ChoiceDeltaFunctionCall {
133        /// The arguments to call the function with, as generated by the model in JSON
134        /// format. Note that the model does not always generate valid JSON, and may
135        /// hallucinate parameters not defined by your function schema. Validate the
136        /// arguments in your code before calling your function.
137        pub arguments: Option<String>,
138        /// The name of the function to call.
139        pub name: Option<String>,
140    }
141
142    #[derive(Debug, Deserialize)]
143    pub struct ChoiceDeltaToolCall {
144        /// The index of the tool call in the list of tool calls.
145        pub index: usize,
146        /// The ID of the tool call.
147        pub id: Option<String>,
148        /// The function that the model called.
149        pub function: Option<ChoiceDeltaToolCallFunction>,
150        /// The type of the tool. Currently, only `function` is supported.
151        #[serde(rename = "type")]
152        pub type_: Option<ChoiceDeltaToolCallType>,
153    }
154
155    #[derive(Debug, Deserialize)]
156    #[serde(rename_all = "snake_case")]
157    pub enum ChoiceDeltaToolCallType {
158        Function,
159    }
160
161    #[derive(Debug, Deserialize)]
162    #[serde(rename_all = "snake_case")]
163    pub enum CompletionRole {
164        Assistant,
165        Developer,
166        System,
167        Tool,
168        User,
169    }
170
171    #[derive(Debug, Deserialize)]
172    #[serde(rename_all = "snake_case")]
173    pub enum CompletionContent {
174        Content(String),
175        /// For deepseek-reasoner model only.
176        ReasoningContent(String),
177    }
178
179    #[derive(Debug, Deserialize)]
180    #[serde(rename_all = "snake_case")]
181    pub enum ChoiceLogprobs {
182        Content(Vec<LogprobeContent>),
183        /// For deepseek-reasoner model only.
184        ReasoningContent(Vec<LogprobeContent>),
185    }
186
187    /// A list of message content tokens with log probability information.
188    #[derive(Debug, Deserialize)]
189    pub struct LogprobeContent {
190        pub token: String,
191        pub logprob: f32,
192        pub bytes: Option<Vec<u8>>,
193        pub top_logprobs: Vec<TopLogprob>,
194    }
195
196    /// List of the most likely tokens and their log probability, at this
197    /// token position. In rare cases, there may be fewer than the number of requested top_logprobs returned.
198    #[derive(Debug, Deserialize)]
199    pub struct TopLogprob {
200        pub token: String,
201        pub logprob: f32,
202        pub bytes: Option<Vec<u8>>,
203    }
204
205    #[derive(Debug, Deserialize)]
206    pub struct CompletionUsage {
207        /// Number of tokens in the generated completion.
208        pub completion_tokens: usize,
209        /// Number of tokens in the prompt.
210        pub prompt_tokens: usize,
211
212        // These two fields seem to be DeepSeek specific.
213        /// Number of tokens in the prompt that hits the context cache.
214        pub prompt_cache_hit_tokens: Option<usize>,
215        /// Number of tokens in the prompt that misses the context cache.
216        pub prompt_cache_miss_tokens: Option<usize>,
217
218        /// Total number of tokens used in the request (prompt + completion).
219        pub total_tokens: usize,
220        /// Breakdown of tokens used in a completion.
221        pub completion_tokens_details: Option<CompletionTokensDetails>,
222        /// Breakdown of tokens used in the prompt.
223        pub prompt_tokens_details: Option<PromptTokensDetails>,
224    }
225
226    #[derive(Debug, Deserialize)]
227    pub struct CompletionTokensDetails {
228        /// When using Predicted Outputs, the number of tokens in the prediction that
229        /// appeared in the completion.
230        pub accepted_prediction_tokens: Option<usize>,
231        /// Audio input tokens generated by the model.
232        pub audio_tokens: Option<usize>,
233        /// Tokens generated by the model for reasoning.
234        pub reasoning_tokens: Option<usize>,
235        /// When using Predicted Outputs, the number of tokens in the prediction that did
236        /// not appear in the completion. However, like reasoning tokens, these tokens are
237        /// still counted in the total completion tokens for purposes of billing, output,
238        /// and context window limits.
239        pub rejected_prediction_tokens: Option<usize>,
240    }
241
242    #[derive(Debug, Deserialize)]
243    pub struct PromptTokensDetails {
244        /// Audio input tokens present in the prompt.
245        pub audio_tokens: Option<usize>,
246        /// Cached tokens present in the prompt.
247        pub cached_tokens: Option<usize>,
248    }
249
250    impl FromStr for ChatCompletionChunk {
251        type Err = crate::errors::ResponseError;
252
253        fn from_str(content: &str) -> Result<Self, Self::Err> {
254            let parse_result: Result<ChatCompletionChunk, _> = serde_json::from_str(content)
255                .map_err(|e| ResponseError::DeserializationError(e.to_string()));
256            parse_result
257        }
258    }
259
260    #[cfg(test)]
261    mod test {
262        use super::*;
263
264        #[test]
265        fn streaming_example_deepseek() {
266            let streams = vec![
267                r#"{"id": "1f633d8bfc032625086f14113c411638", "choices": [{"index": 0, "delta": {"content": "", "role": "assistant"}, "finish_reason": null, "logprobs": null}], "created": 1718345013, "model": "deepseek-chat", "system_fingerprint": "fp_a49d71b8a1", "object": "chat.completion.chunk", "usage": null}"#,
268                r#"{"choices": [{"delta": {"content": "Hello", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
269                r#"{"choices": [{"delta": {"content": "!", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
270                r#"{"choices": [{"delta": {"content": " How", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
271                r#"{"choices": [{"delta": {"content": " can", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
272                r#"{"choices": [{"delta": {"content": " I", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
273                r#"{"choices": [{"delta": {"content": " assist", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
274                r#"{"choices": [{"delta": {"content": " you", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
275                r#"{"choices": [{"delta": {"content": " today", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
276                r#"{"choices": [{"delta": {"content": "?", "role": "assistant"}, "finish_reason": null, "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1"}"#,
277                r#"{"choices": [{"delta": {"content": "", "role": null}, "finish_reason": "stop", "index": 0, "logprobs": null}], "created": 1718345013, "id": "1f633d8bfc032625086f14113c411638", "model": "deepseek-chat", "object": "chat.completion.chunk", "system_fingerprint": "fp_a49d71b8a1", "usage": {"completion_tokens": 9, "prompt_tokens": 17, "total_tokens": 26}}"#,
278            ];
279
280            for stream in streams {
281                let parsed = ChatCompletionChunk::from_str(stream);
282                match parsed {
283                    Ok(completion) => {
284                        println!("Deserialized: {:#?}", completion);
285                    }
286                    Err(e) => {
287                        panic!("Failed to deserialize {}: {}", stream, e);
288                    }
289                }
290            }
291        }
292
293        #[test]
294        fn streaming_example_qwen() {
295            let streams = vec![
296                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"","function_call":null,"refusal":null,"role":"assistant","tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
297                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"我是","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
298                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"来自","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
299                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"阿里","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
300                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"云的超大规模","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
301                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"语言模型,我","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
302                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"叫通义千","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
303                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"问。","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":null,"index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
304                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[{"delta":{"content":"","function_call":null,"refusal":null,"role":null,"tool_calls":null},"finish_reason":"stop","index":0,"logprobs":null}],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":null}"#,
305                r#"{"id":"chatcmpl-e30f5ae7-3063-93c4-90fe-beb5f900bd57","choices":[],"created":1735113344,"model":"qwen-plus","object":"chat.completion.chunk","service_tier":null,"system_fingerprint":null,"usage":{"completion_tokens":17,"prompt_tokens":22,"total_tokens":39,"completion_tokens_details":null,"prompt_tokens_details":{"audio_tokens":null,"cached_tokens":0}}}"#,
306            ];
307
308            for stream in streams {
309                let parsed = ChatCompletionChunk::from_str(stream);
310                match parsed {
311                    Ok(completion) => {
312                        println!("Deserialized: {:#?}", completion);
313                    }
314                    Err(e) => {
315                        panic!("Failed to deserialize {}: {}", stream, e);
316                    }
317                }
318            }
319        }
320    }
321}
322
323pub mod no_streaming {
324    use std::str::FromStr;
325
326    use serde::Deserialize;
327
328    use crate::errors::ResponseError;
329
330    #[derive(Debug, Deserialize)]
331    pub struct ChatCompletion {
332        /// A unique identifier for the chat completion.
333        pub id: String,
334        /// A list of chat completion choices. Can be more than one
335        /// if `n` is greater than 1.
336        pub choices: Vec<Choice>,
337        /// The Unix timestamp (in seconds) of when the chat completion was created.
338        pub created: u64,
339        /// The model used for the chat completion.
340        pub model: String,
341        /// Specifies the processing type used for serving the request.
342        ///
343        /// - If set to 'auto', then the request will be processed with the service tier
344        ///   configured in the Project settings. Unless otherwise configured, the Project
345        ///   will use 'default'.
346        /// - If set to 'default', then the request will be processed with the standard
347        ///   pricing and performance for the selected model.
348        /// - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
349        ///   '[priority](https://openai.com/api-priority-processing/)', then the request
350        ///   will be processed with the corresponding service tier.
351        /// - When not set, the default behavior is 'auto'.
352        ///
353        /// When the `service_tier` parameter is set, the response body will include the
354        /// `service_tier` value based on the processing mode actually used to serve the
355        /// request. This response value may be different from the value set in the
356        /// parameter.
357        pub service_tier: Option<ServiceTier>,
358        /// The system fingerprint used for the chat completion.
359        /// Can be used in conjunction with the `seed` request parameter to understand when
360        /// backend changes have been made that might impact determinism.
361        pub system_fingerprint: Option<String>,
362        /// The object type, which is always `chat.completion`.
363        pub object: ChatCompletionObject,
364        /// Usage statistics for the completion request.
365        pub usage: Option<CompletionUsage>,
366    }
367
368    #[derive(Debug, Deserialize)]
369    #[serde(rename_all = "lowercase")]
370    pub enum ServiceTier {
371        Auto,
372        Default,
373        Flex,
374        Scale,
375        Priority,
376    }
377
378    /// The object type, which is always `chat.completion`.
379    #[derive(Debug, Deserialize)]
380    pub enum ChatCompletionObject {
381        /// The object type is always `chat.completion`.
382        #[serde(rename = "chat.completion")]
383        ChatCompletion,
384    }
385
386    #[derive(Debug, Deserialize)]
387    pub struct Choice {
388        /// The reason the model stopped generating tokens.
389        ///
390        /// This will be `stop` if the model hit a natural stop point or a provided stop
391        /// sequence, `length` if the maximum number of tokens specified in the request was
392        /// reached, `content_filter` if content was omitted due to a flag from our content
393        /// filters, `tool_calls` if the model called a tool, or `function_call`
394        /// (deprecated) if the model called a function.
395        pub finish_reason: FinishReason,
396        /// The index of the choice in the list of choices.
397        pub index: usize,
398        /// Log probability information for the choice.
399        pub logprobs: Option<ChoiceLogprobs>,
400        /// A chat completion message generated by the model.
401        pub message: ChatCompletionMessage,
402    }
403
404    #[derive(Debug, Deserialize, PartialEq)]
405    #[serde(rename_all = "snake_case")]
406    pub enum FinishReason {
407        Length,
408        Stop,
409        ToolCalls,
410        FunctionCall,
411        ContentFilter,
412        /// This choice can only be found in the manual of DeepSeek
413        InsufficientSystemResource,
414    }
415
416    /// Fields that are not supported yet:
417    /// - _audio_: If the audio output modality is requested, this object contains
418    /// data about the audio response from the model.
419    /// [Learn more from OpenAI](https://platform.openai.com/docs/guides/audio).
420    #[derive(Debug, Deserialize)]
421    pub struct ChatCompletionMessage {
422        /// The role of the author of this message. This shall always
423        /// be ResponseRole::Assistant
424        pub role: ResponseRole,
425        /// The contents of the message.
426        pub content: Option<String>,
427        pub reasoning_content: Option<String>,
428        /// The tool calls generated by the model, such as function calls.
429        /// Tool calls deserialization is not supported yet.
430        pub tool_calls: Option<Vec<ChatCompletionMessageToolCall>>,
431    }
432
433    #[derive(Debug, Deserialize)]
434    #[serde(tag = "type", rename_all = "snake_case")]
435    pub enum ChatCompletionMessageToolCall {
436        /// The type of the tool. Currently, only `function` is supported.
437        /// The field { type = "function" } is added automatically.
438        Function {
439            /// The ID of the tool call.
440            id: String,
441            /// The function that the model called.
442            function: String, // function type
443        },
444        /// The type of the tool. Always `custom`.
445        /// The field { type = "custom" } is added automatically.
446        Custom {
447            /// The id of the tool call.
448            id: String,
449            /// The custom tool that the model called.
450            custom: MessageToolCallCustom,
451        },
452    }
453
454    #[derive(Debug, Deserialize)]
455    pub struct MessageToolCallCustom {
456        /// The input for the custom tool call generated by the model.
457        pub input: String,
458        /// The name of the custom tool to call.
459        pub name: String,
460    }
461
462    #[derive(Debug, Deserialize)]
463    pub struct MessageToolCallFunction {
464        /// The arguments to call the function with, as generated by the model in JSON
465        /// format. Note that the model does not always generate valid JSON, and may
466        /// hallucinate parameters not defined by your function schema. Validate the
467        /// arguments in your code before calling your function.
468        pub arguments: String,
469        /// The name of the function to call.
470        pub name: String,
471    }
472
473    #[derive(Debug, Deserialize)]
474    #[serde(rename_all = "snake_case")]
475    pub enum ResponseRole {
476        /// The role of the response message is always assistant.
477        Assistant,
478    }
479
480    #[derive(Debug, Deserialize)]
481    pub struct ChoiceLogprobs {
482        /// A list of message content tokens with log probability information.
483        pub content: Option<Vec<TokenLogProb>>,
484        /// Only found in DeepSeek's manual.
485        pub reasoning_content: Option<Vec<TokenLogProb>>,
486        /// A list of message refusal tokens with log probability information.
487        pub refusal: Option<Vec<TokenLogProb>>,
488    }
489
490    #[derive(Debug, Deserialize)]
491    pub struct TokenLogProb {
492        /// The token.
493        pub token: String,
494        /// The log probability of this token, if it is within the top 20 most likely
495        /// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
496        /// unlikely.
497        pub logprob: f32,
498        /// A list of integers representing the UTF-8 bytes representation of the token.
499        ///
500        /// Useful in instances where characters are represented by multiple tokens and
501        /// their byte representations must be combined to generate the correct text
502        /// representation. Can be `null` if there is no bytes representation for the token.
503        pub bytes: Option<Vec<u8>>,
504        /// List of the most likely tokens and their log probability, at this token
505        /// position. In rare cases, there may be fewer than the number of requested
506        /// `top_logprobs` returned.
507        pub top_logprobs: Vec<TopLogprob>,
508    }
509
510    #[derive(Debug, Deserialize)]
511    pub struct TopLogprob {
512        /// The token.
513        pub token: String,
514        /// A list of integers representing the UTF-8 bytes representation of the token.
515        ///
516        /// Useful in instances where characters are represented by multiple tokens and
517        /// their byte representations must be combined to generate the correct text
518        /// representation. Can be `null` if there is no bytes representation for the token.
519        pub logprob: f32,
520        /// List of the most likely tokens and their log probability, at this token
521        /// position. In rare cases, there may be fewer than the number of requested
522        /// `top_logprobs` returned.
523        pub bytes: Option<Vec<u8>>,
524    }
525
526    #[derive(Debug, Deserialize)]
527    pub struct CompletionUsage {
528        /// Number of tokens in the generated completion.
529        pub completion_tokens: usize,
530        /// Number of tokens in the prompt.
531        pub prompt_tokens: usize,
532
533        // These two fields seem to be DeepSeek specific.
534        /// Number of tokens in the prompt that hits the context cache.
535        pub prompt_cache_hit_tokens: Option<usize>,
536        /// Number of tokens in the prompt that misses the context cache.
537        pub prompt_cache_miss_tokens: Option<usize>,
538
539        /// Total number of tokens used in the request (prompt + completion).
540        pub total_tokens: usize,
541        /// Breakdown of tokens used in a completion.
542        pub completion_tokens_details: Option<CompletionTokensDetails>,
543        /// Breakdown of tokens used in the prompt.
544        pub prompt_tokens_details: Option<PromptTokensDetails>,
545    }
546
547    #[derive(Debug, Deserialize)]
548    pub struct CompletionTokensDetails {
549        /// When using Predicted Outputs, the number of tokens in the prediction that
550        /// appeared in the completion.
551        pub accepted_prediction_tokens: Option<usize>,
552        /// Audio input tokens generated by the model.
553        pub audio_tokens: Option<usize>,
554        /// Tokens generated by the model for reasoning.
555        pub reasoning_tokens: Option<usize>,
556        /// When using Predicted Outputs, the number of tokens in the prediction that did
557        /// not appear in the completion. However, like reasoning tokens, these tokens are
558        /// still counted in the total completion tokens for purposes of billing, output,
559        /// and context window limits.
560        pub rejected_prediction_tokens: Option<usize>,
561    }
562
563    #[derive(Debug, Deserialize)]
564    pub struct PromptTokensDetails {
565        /// Audio input tokens present in the prompt.
566        pub audio_tokens: Option<usize>,
567        /// Cached tokens present in the prompt.
568        pub cached_tokens: Option<usize>,
569    }
570
571    impl FromStr for ChatCompletion {
572        type Err = crate::errors::ResponseError;
573
574        fn from_str(content: &str) -> Result<Self, Self::Err> {
575            let parse_result: Result<ChatCompletion, _> = serde_json::from_str(content)
576                .map_err(|e| ResponseError::DeserializationError(e.to_string()));
577            parse_result
578        }
579    }
580
581    #[cfg(test)]
582    mod test {
583        use super::*;
584
585        #[test]
586        fn no_streaming_example_deepseek() {
587            let json = r#"{
588              "id": "30f6413a-a827-4cf3-9898-f13a8634b798",
589              "object": "chat.completion",
590              "created": 1757944111,
591              "model": "deepseek-chat",
592              "choices": [
593                {
594                  "index": 0,
595                  "message": {
596                    "role": "assistant",
597                    "content": "Hello! How can I help you today? 😊"
598                  },
599                  "logprobs": null,
600                  "finish_reason": "stop"
601                }
602              ],
603              "usage": {
604                "prompt_tokens": 10,
605                "completion_tokens": 11,
606                "total_tokens": 21,
607                "prompt_tokens_details": {
608                  "cached_tokens": 0
609                },
610                "prompt_cache_hit_tokens": 0,
611                "prompt_cache_miss_tokens": 10
612              },
613              "system_fingerprint": "fp_08f168e49b_prod0820_fp8_kvcache"
614            }"#;
615
616            let parsed = ChatCompletion::from_str(json);
617            match parsed {
618                Ok(_) => {}
619                Err(e) => {
620                    panic!("Failed to deserialize: {}", e);
621                }
622            }
623        }
624    }
625}