open_ai/resources/chat/
completions.rs

1use crate::core::streaming::APIFuture;
2use crate::core::RequestOptions;
3use crate::resource::APIResource;
4use crate::resources::completions as completion_api;
5use crate::shared;
6use crate::{OpenAI, OpenAIObject};
7use futures::executor::block_on;
8use serde::{Deserialize, Serialize};
9use serde_json::Value;
10use std::cell::RefCell;
11use std::collections::HashMap;
12use std::error::Error;
13use std::rc::Rc;
14
15#[derive(Debug, Clone)]
16pub struct Completions {
17    pub client: Option<APIResource>,
18}
19
20impl Completions {
21    pub fn new() -> Self {
22        Completions { client: None }
23    }
24
25    /// Creates a model response for the given chat conversation.
26    pub fn create<'a>(
27        &'a self,
28        body: ChatCompletionCreateParams<'a>,
29    ) -> APIFuture<ChatCompletionCreateParams, ChatCompletion, ChatCompletionChunk> {
30        let stream = body.stream.unwrap_or(false);
31        self.client.clone().unwrap().clone().lock().unwrap().post(
32            "/chat/completions",
33            Some(RequestOptions {
34                body: Some(body),
35                stream: Some(stream),
36                ..Default::default()
37            }),
38        )
39    }
40}
41
42/// Represents a chat completion response returned by model, based on the provided
43/// input.
44#[derive(Default, Debug, Clone, Deserialize, Serialize)]
45pub struct ChatCompletion {
46    /// A unique identifier for the chat completion.
47    pub id: String,
48
49    /// A list of chat completion choices. Can be more than one if `n` is greater
50    /// than 1.
51    pub choices: Vec<chat_completion::Choice>,
52
53    /// The Unix timestamp (in seconds) of when the chat completion was created.
54    pub created: u64,
55
56    /// The model used for the chat completion.
57    pub model: String,
58
59    /// The object type, which is always `chat.completion`.
60    pub object: OpenAIObject,
61
62    /// The service tier used for processing the request. This field is only included if
63    /// the `service_tier` parameter is specified in the request.
64    pub service_tier: Option<ServiceTier>,
65
66    /// This fingerprint represents the backend configuration that the model runs with.
67    ///
68    /// Can be used in conjunction with the `seed` request parameter to understand when
69    /// backend changes have been made that might impact determinism.
70    pub system_fingerprint: Option<String>,
71
72    /// Usage statistics for the completion request.
73    pub usage: Option<completion_api::CompletionUsage>,
74}
75
76#[derive(Default, Debug, Clone, Serialize, Deserialize)]
77#[serde(rename_all = "snake_case")]
78pub enum FinishReason {
79    #[default]
80    Stop,
81    Length,
82    ToolCalls,
83    ContentFilter,
84    FunctionCall,
85}
86
87#[derive(Default, Debug, Clone, Serialize, Deserialize)]
88#[serde(rename_all = "snake_case")]
89pub enum ChatCompletionRole {
90    #[default]
91    Assistant,
92    User,
93    System,
94    Tool,
95}
96
97#[derive(Default, Debug, Clone, Serialize, Deserialize)]
98#[serde(rename_all = "snake_case")]
99pub enum ServiceTier {
100    #[default]
101    Default,
102    Scale,
103}
104
105pub mod chat_completion {
106    use super::*;
107
108    /// Represents a chat completion choice.
109    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
110    pub struct Choice {
111        /// The reason the model stopped generating tokens. This will be `stop` if the model
112        /// hit a natural stop point or a provided stop sequence, `length` if the maximum
113        /// number of tokens specified in the request was reached, `content_filter` if
114        /// content was omitted due to a flag from our content filters, `tool_calls` if the
115        /// model called a tool, or `function_call` (deprecated) if the model called a
116        /// function.
117        pub finish_reason: FinishReason,
118
119        /// The index of the choice in the list of choices.
120        pub index: u32,
121
122        /// Log probability information for the choice.
123        pub logprobs: Option<choice::Logprobs>,
124
125        /// A chat completion message generated by the model.
126        pub message: ChatCompletionMessage,
127    }
128}
129
130pub mod choice {
131    use super::*;
132
133    /// Log probability information for the choice.
134    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
135    pub struct Logprobs {
136        /// A list of message content tokens with log probability information.
137        pub content: Option<Vec<ChatCompletionTokenLogprob>>,
138        //  content: Option<Vec<ChatCompletionsAPI.ChatCompletionTokenLogprob> | null;
139    }
140}
141
142#[derive(Default, Debug, Clone, Deserialize, Serialize)]
143pub struct ChatCompletionAssistantMessageParam {
144    /// The role of the messages author, in this case `assistant`.
145    pub role: String, // 'assistant'
146
147    /// The contents of the assistant message. Required unless `tool_calls` or
148    /// `function_call` is specified.
149    pub content: Option<String>,
150
151    /// An optional name for the participant. Provides the model information to
152    /// differentiate between participants of the same role.
153    #[serde(skip_serializing_if = "Option::is_none")]
154    pub name: Option<String>,
155
156    /// The tool calls generated by the model, such as function calls.
157    #[serde(skip_serializing_if = "Option::is_none")]
158    pub tool_calls: Option<Vec<ChatCompletionMessageToolCall>>,
159
160    #[deprecated(
161        note = "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model."
162    )]
163    pub function_call: Option<chat_completion_assistant_message_param::FunctionCall>,
164}
165
166pub mod chat_completion_assistant_message_param {
167    use super::*;
168
169    #[deprecated(
170        note = "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model."
171    )]
172    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
173    pub struct FunctionCall {
174        /// The arguments to call the function with, as generated by the model in JSON
175        /// format. Note that the model does not always generate valid JSON, and may
176        /// hallucinate parameters not defined by your function schema. Validate the
177        /// arguments in your code before calling your function.
178        pub arguments: String,
179
180        /// The name of the function to call.
181        pub name: String,
182    }
183}
184
185/// Represents a streamed chunk of a chat completion response returned by model,
186/// based on the provided input.
187#[derive(Default, Debug, Clone, Deserialize, Serialize)]
188pub struct ChatCompletionChunk {
189    /// A unique identifier for the chat completion. Each chunk has the same ID.
190    pub id: String,
191
192    /// A list of chat completion choices. Can contain more than one elements if `n` is
193    /// greater than 1. Can also be empty for the last chunk if you set
194    /// `stream_options: {"include_usage": true}`.
195    pub choices: Vec<chat_completion_chunk::Choice>,
196
197    /// The Unix timestamp (in seconds) of when the chat completion was created. Each
198    /// chunk has the same timestamp.
199    pub created: u64,
200
201    /// The model to generate the completion.
202    pub model: String,
203
204    /// The object type, which is always `chat.completion.chunk`.
205    pub object: String, // "chat.completion.chunk"
206
207    /// The service tier used for processing the request. This field is only included if
208    /// the `service_tier` parameter is specified in the request.
209    #[serde(skip_serializing_if = "Option::is_none")]
210    pub service_tier: Option<ServiceTier>,
211
212    /// This fingerprint represents the backend configuration that the model runs with.
213    /// Can be used in conjunction with the `seed` request parameter to understand when
214    /// backend changes have been made that might impact determinism.
215    #[serde(skip_serializing_if = "Option::is_none")]
216    pub system_fingerprint: Option<String>,
217
218    /// An optional field that will only be present when you set
219    /// `stream_options: {"include_usage": true}` in your request. When present, it
220    /// contains a null value except for the last chunk which contains the token usage
221    /// statistics for the entire request.
222    #[serde(skip_serializing_if = "Option::is_none")]
223    pub usage: Option<completion_api::CompletionUsage>,
224}
225
226// struct Chunk {
227//     id: "chatcmpl-9quHjsUbwM6nAuNFEkx3WqxstvTxK",
228//     object: "chat.completion.chunk",
229//     created: 1722396443,
230//     model: "gpt-4o-mini-2024-07-18",
231//     system_fingerprint: "fp_0f03d4f0ee",
232//     choices: [
233//         {
234//             index: 0,
235//             delta: {
236//                 role: "assistant",
237//                 content: ""
238//             },
239//             logprobs: null,
240//             finish_reason: null
241//         }
242//     ]
243// }
244
245pub mod chat_completion_chunk {
246    use super::*;
247
248    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
249    pub struct Choice {
250        /// A chat completion delta generated by streamed model responses.
251        pub delta: choice::Delta,
252
253        /// The reason the model stopped generating tokens. This will be `stop` if the model
254        /// hit a natural stop point or a provided stop sequence, `length` if the maximum
255        /// number of tokens specified in the request was reached, `content_filter` if
256        /// content was omitted due to a flag from our content filters, `tool_calls` if the
257        /// model called a tool, or `function_call` (deprecated) if the model called a
258        /// function.
259        pub finish_reason: Option<FinishReason>,
260
261        /// The index of the choice in the list of choices.
262        pub index: u32,
263
264        /// Log probability information for the choice.
265        #[serde(skip_serializing_if = "Option::is_none")]
266        pub logprobs: Option<choice::Logprobs>,
267    }
268
269    pub mod choice {
270        use super::*;
271
272        /// A chat completion delta generated by streamed model responses.
273        #[derive(Default, Debug, Clone, Deserialize, Serialize)]
274        pub struct Delta {
275            /// The contents of the chunk message.
276            #[serde(skip_serializing_if = "Option::is_none")]
277            pub content: Option<String>,
278
279            /// @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
280            /// a function that should be called, as generated by the model.
281            #[serde(skip_serializing_if = "Option::is_none")]
282            pub function_call: Option<delta::FunctionCall>,
283
284            /// The role of the author of this message.
285            #[serde(skip_serializing_if = "Option::is_none")]
286            pub role: Option<ChatCompletionRole>,
287
288            #[serde(skip_serializing_if = "Option::is_none")]
289            pub tool_calls: Option<Vec<delta::ToolCall>>,
290        }
291
292        pub mod delta {
293            use super::*;
294
295            /// @deprecated: Deprecated and replaced by `tool_calls`. The name and arguments of
296            /// a function that should be called, as generated by the model.
297            #[derive(Default, Debug, Clone, Deserialize, Serialize)]
298            pub struct FunctionCall {
299                /// The arguments to call the function with, as generated by the model in JSON
300                /// format. Note that the model does not always generate valid JSON, and may
301                /// hallucinate parameters not defined by your function schema. Validate the
302                /// arguments in your code before calling your function.
303                #[serde(skip_serializing_if = "Option::is_none")]
304                pub arguments: Option<String>,
305
306                /// The name of the function to call.
307                #[serde(skip_serializing_if = "Option::is_none")]
308                pub name: Option<String>,
309            }
310
311            #[derive(Default, Debug, Clone, Deserialize, Serialize)]
312            pub struct ToolCall {
313                pub index: u32,
314
315                /// The ID of the tool call.
316                #[serde(skip_serializing_if = "Option::is_none")]
317                pub id: Option<String>,
318
319                #[serde(skip_serializing_if = "Option::is_none")]
320                pub function: Option<tool_call::Function>,
321
322                /// The type of the tool. Currently, only `function` is supported.
323                #[serde(rename = "type", skip_serializing_if = "Option::is_none")]
324                pub tool_call_type: Option<String>, // "function"
325            }
326
327            pub mod tool_call {
328                use super::*;
329
330                #[derive(Default, Debug, Clone, Deserialize, Serialize)]
331                pub struct Function {
332                    /// The arguments to call the function with, as generated by the model in JSON
333                    /// format. Note that the model does not always generate valid JSON, and may
334                    /// hallucinate parameters not defined by your function schema. Validate the
335                    /// arguments in your code before calling your function.
336                    #[serde(skip_serializing_if = "Option::is_none")]
337                    pub arguments: Option<String>,
338
339                    /// The name of the function to call.
340                    #[serde(skip_serializing_if = "Option::is_none")]
341                    pub name: Option<String>,
342                }
343            }
344        }
345
346        /// Log probability information for the choice.
347        #[derive(Default, Debug, Clone, Deserialize, Serialize)]
348        pub struct Logprobs {
349            /// A list of message content tokens with log probability information.
350            pub content: Option<Vec<ChatCompletionTokenLogprob>>, // null
351        }
352    }
353}
354
355// export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage;
356// export type ChatCompletionContentPart = ChatCompletionContentPartText | ChatCompletionContentPartImage;
357
358// #[derive(Debug, Clone, Serialize, Deserialize)]
359// #[serde(untagged)]
360// pub enum ChatCompletionContentPart {
361//     Text(ChatCompletionContentPartText),
362//     Image(ChatCompletionContentPartImage),
363// }
364//
365// impl Default for ChatCompletionContentPart {
366//     fn default() -> Self {
367//         ChatCompletionContentPart::Text(Default::default())
368//     }
369// }
370//
371#[derive(Debug, Clone, Serialize, Deserialize)]
372#[serde(untagged)]
373pub enum ChatCompletionContent<'a> {
374    Text(&'a str),
375    Multiple(Vec<ChatCompletionContentPart<'a>>),
376}
377
378impl<'a> Default for ChatCompletionContent<'a> {
379    fn default() -> Self {
380        ChatCompletionContent::Text(Default::default())
381    }
382}
383
384#[derive(Debug, Clone, Serialize, Deserialize)]
385#[serde(tag = "type", rename_all = "snake_case")]
386pub enum ChatCompletionContentPart<'a> {
387    Text {
388        text: &'a str,
389    },
390    #[serde(rename = "image_url")]
391    Image {
392        image_url: chat_completion_content_part_image::ImageURL<'a>,
393    },
394}
395
396impl<'a> Default for ChatCompletionContentPart<'a> {
397    fn default() -> Self {
398        ChatCompletionContentPart::Text { text: "" }
399    }
400}
401
402#[derive(Default, Debug, Clone, Serialize, Deserialize)]
403#[serde(bound(deserialize = "'de: 'a"))]
404pub struct ChatCompletionContentPartImage<'a> {
405    pub image_url: chat_completion_content_part_image::ImageURL<'a>,
406
407    /// The type of the content part.
408    #[serde(rename = "type")]
409    pub content_type: String, // 'image_url',
410}
411
412pub mod chat_completion_content_part_image {
413    use super::*;
414
415    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
416    pub struct ImageURL<'a> {
417        /// Either a URL of the image or the base64 encoded image data.
418        pub url: &'a str,
419
420        /// Specifies the detail level of the image. Learn more in the
421        /// [Vision guide](https://platform.openai.com/docs/guides/vision/low-or-high-fidelity-image-understanding).
422        #[serde(skip_serializing_if = "Option::is_none")]
423        pub detail: Option<Detail>,
424    }
425
426    #[derive(Default, Debug, Clone, Serialize, Deserialize)]
427    #[serde(rename_all = "snake_case")]
428    pub enum Detail {
429        #[default]
430        Auto,
431        Low,
432        High,
433    }
434}
435
436#[derive(Default, Debug, Clone, Serialize, Deserialize)]
437pub struct ChatCompletionContentPartText {
438    /// The text content.
439    text: String,
440
441    /// The type of the content part.
442    content_type: String, // 'text',
443}
444
445/// Specifying a particular function via `{"name": "my_function"}` forces the model
446/// to call that function.
447pub struct ChatCompletionFunctionCallOption {
448    /// The name of the function to call.
449    pub name: String,
450}
451
452#[deprecated()]
453pub struct ChatCompletionFunctionMessageParam {
454    /// The contents of the function message.
455    content: Option<String>,
456
457    /// The name of the function to call.
458    name: String,
459
460    /// The role of the messages author, in this case `function`.
461    role: String, // "function"
462}
463
464/// A chat completion message generated by the model.
465#[derive(Default, Debug, Clone, Deserialize, Serialize)]
466pub struct ChatCompletionMessage {
467    /// The contents of the message.
468    pub content: Option<String>,
469
470    /// The role of the author of this message.
471    pub role: String, // 'assistant',
472
473    /// The name and arguments ofpub a function that should be called, as generated by the model,
474    #[deprecated(
475        note = "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model."
476    )]
477    pub function_call: Option<chat_completion_message::FunctionCall>,
478
479    /// The tool calls generated by the model, such as function calls.
480    pub tool_calls: Option<Vec<ChatCompletionMessageToolCall>>,
481}
482
483pub mod chat_completion_message {
484    use super::*;
485
486    #[deprecated(
487        note = "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model."
488    )]
489    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
490    pub struct FunctionCall {
491        /// The arguments to call the function with, as generated by the model in JSON
492        /// format. Note that the model does not always generate valid JSON, and may
493        /// hallucinate parameters not defined by your function schema. Validate the
494        /// arguments in your code before calling your function.
495        pub arguments: String,
496
497        /// The name of the function to call.
498        pub name: String,
499    }
500}
501
502#[derive(Debug, Clone, Deserialize, Serialize)]
503#[serde(tag = "role", rename_all = "snake_case")]
504pub enum ChatCompletionMessageParam<'a> {
505    Assistant {
506        /// The contents of the assistant message. Required unless `tool_calls` or
507        /// `function_call` is specified.
508        #[serde(skip_serializing_if = "Option::is_none")]
509        content: Option<&'a str>,
510
511        /// An optional name for the participant. Provides the model information to
512        /// differentiate between participants of the same role.
513        #[serde(skip_serializing_if = "Option::is_none")]
514        name: Option<&'a str>,
515
516        /// The tool calls generated by the model, such as function calls.
517        #[serde(skip_serializing_if = "Option::is_none")]
518        tool_calls: Option<Vec<ChatCompletionMessageToolCall>>,
519    },
520    User {
521        /// The contents of the user message.
522        content: ChatCompletionContent<'a>,
523
524        /// An optional name for the participant. Provides the model information to
525        /// differentiate between participants of the same role.
526        #[serde(skip_serializing_if = "Option::is_none")]
527        name: Option<&'a str>,
528    },
529    System {
530        /// The contents of the system message.
531        content: &'a str,
532
533        /// An optional name for the participant. Provides the model information to
534        /// differentiate between participants of the same role.
535        #[serde(skip_serializing_if = "Option::is_none")]
536        name: Option<&'a str>,
537    },
538    Tool {
539        /// The contents of the tool message.
540        content: &'a str,
541
542        /// Tool call that this message is responding to.
543        tool_call_id: &'a str,
544    },
545}
546
547impl<'a> Default for ChatCompletionMessageParam<'a> {
548    fn default() -> Self {
549        ChatCompletionMessageParam::Assistant {
550            content: None,
551            name: None,
552            tool_calls: None,
553        }
554    }
555}
556
557// #[derive(Default, Debug, Clone, Deserialize, Serialize)]
558// pub struct ChatCompletionSystemParam {
559//     /// The contents of the system message.
560//     pub content: String,
561//
562//     /// An optional name for the participant. Provides the model information to
563//     /// differentiate between participants of the same role.
564//     #[serde(skip_serializing_if = "Option::is_none")]
565//     pub name: Option<String>,
566// }
567
568// #[derive(Default, Debug, Clone, Serialize, Deserialize)]
569// pub struct ChatCompletionUserParam {
570//     /// The contents of the user message.
571//     pub content: ChatCompletionContent,
572//
573//     /// An optional name for the participant. Provides the model information to
574//     /// differentiate between participants of the same role.
575//     #[serde(skip_serializing_if = "Option::is_none")]
576//     pub name: Option<String>,
577// }
578
579// #[derive(Default, Debug, Clone, Deserialize, Serialize)]
580// pub struct ChatCompletionAssistantParam {
581//     /// The contents of the assistant message. Required unless `tool_calls` or
582//     /// `function_call` is specified.
583//     #[serde(skip_serializing_if = "Option::is_none")]
584//     pub content: Option<String>,
585//
586//     /// An optional name for the participant. Provides the model information to
587//     /// differentiate between participants of the same role.
588//     #[serde(skip_serializing_if = "Option::is_none")]
589//     pub name: Option<String>,
590//
591//     /// The tool calls generated by the model, such as function calls.
592//     #[serde(skip_serializing_if = "Option::is_none")]
593//     pub tool_calls: Option<Vec<ChatCompletionMessageToolCall>>,
594// }
595
596// #[derive(Default, Debug, Clone, Deserialize, Serialize)]
597// pub struct ChatCompletionToolParam {
598//     /// The contents of the tool message.
599//     pub content: String,
600//
601//     /// Tool call that this message is responding to.
602//     pub tool_call_id: String,
603// }
604
605#[derive(Default, Debug, Clone, Deserialize, Serialize)]
606pub struct ChatCompletionMessageToolCall {
607    /// The ID of the tool call.
608    pub id: String,
609
610    /// The function that the model called.
611    pub function: chat_completion_message_tool_call::Function,
612
613    /// The type of the tool. Currently, only `function` is supported.
614    #[serde(rename = "type")]
615    pub tool_call_type: String,
616}
617
618#[derive(Default, Debug, Clone, Serialize, Deserialize)]
619#[serde(rename_all = "snake_case")]
620pub enum ChatCompletionMessageToolCallType {
621    #[default]
622    Function,
623}
624
625mod chat_completion_message_tool_call {
626    use super::*;
627    /// The function that the model called.
628    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
629    pub struct Function {
630        /// The arguments to call the function with, as generated by the model in JSON
631        /// format. Note that the model does not always generate valid JSON, and may
632        /// hallucinate parameters not defined by your function schema. Validate the
633        /// arguments in your code before calling your function.
634        pub arguments: String,
635
636        /// The name of the function to call.
637        pub name: String,
638    }
639}
640
641/// Specifies a tool the model should use. Use to force the model to call a specific
642/// function.
643#[derive(Default, Debug, Clone, Serialize, Deserialize)]
644pub struct ChatCompletionNamedToolChoice {
645    pub function: chat_completion_named_tool_choice::Function,
646
647    /// The type of the tool. Currently, only `function` is supported.
648    #[serde(rename = "type")]
649    tool_choice_type: ChatCompletionNamedToolChoiceType,
650}
651
652#[derive(Default, Debug, Clone, Serialize, Deserialize)]
653#[serde(rename_all = "snake_case")]
654pub enum ChatCompletionNamedToolChoiceType {
655    #[default]
656    Function,
657}
658
659pub mod chat_completion_named_tool_choice {
660    use super::*;
661
662    #[derive(Default, Debug, Clone, Serialize, Deserialize)]
663    pub struct Function {
664        /// The name of the function to call.
665        name: String,
666    }
667}
668
669/// The role of the author of a message
670// export type ChatCompletionRole = 'system' | 'user' | 'assistant' | 'tool' | 'function';
671
672/// Options for streaming response. Only set this when you set `stream: true`.
673#[derive(Default, Debug, Clone, Serialize, Deserialize)]
674pub struct ChatCompletionStreamOptions {
675    /// If set, an additional chunk will be streamed before the `data: [DONE]` message.
676    /// The `usage` field on this chunk shows the token usage statistics for the entire
677    /// request, and the `choices` field will always be an empty array. All other chunks
678    /// will also include a `usage` field, but with a null value.
679    pub include_usage: Option<bool>,
680}
681
682#[derive(Default, Debug, Clone, Deserialize, Serialize)]
683pub struct ChatCompletionSystemMessageParam {
684    /// The contents of the system message.
685    pub content: String,
686
687    /// The role of the messages author, in this case `system`.
688    pub role: String, // 'system',
689
690    /// An optional name for the participant. Provides the model information to
691    /// differentiate between participants of the same role.
692    #[serde(skip_serializing_if = "Option::is_none")]
693    pub name: Option<String>,
694}
695
696#[derive(Default, Debug, Clone, Deserialize, Serialize)]
697pub struct ChatCompletionTokenLogprob {
698    /// The token.
699    pub token: String,
700
701    /// A list of integers representing the UTF-8 bytes representation of the token.
702    /// Useful in instances where characters are represented by multiple tokens and
703    /// their byte representations must be combined to generate the correct text
704    /// representation. Can be `null` if there is no bytes representation for the token.
705    pub bytes: Option<Vec<u8>>,
706
707    /// The log probability of this token, if it is within the top 20 most likely
708    /// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
709    /// unlikely.
710    pub logprob: f32,
711
712    /// List of the most likely tokens and their log probability, at this token
713    /// position. In rare cases, there may be fewer than the number of requested
714    /// `top_logprobs` returned.
715    pub top_logprobs: Vec<chat_completion_token_logprob::TopLogprob>,
716}
717
718pub mod chat_completion_token_logprob {
719    use super::*;
720
721    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
722    pub struct TopLogprob {
723        /// The token.
724        token: String,
725
726        /// A list of integers representing the UTF-8 bytes representation of the token.
727        /// Useful in instances where characters are represented by multiple tokens and
728        /// their byte representations must be combined to generate the correct text
729        /// representation. Can be `null` if there is no bytes representation for the token.
730        pub bytes: Option<Vec<u8>>,
731
732        /// The log probability of this token, if it is within the top 20 most likely
733        /// tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
734        /// unlikely.
735        pub logprob: f32,
736    }
737}
738
739#[derive(Default, Debug, Clone, Deserialize, Serialize)]
740pub struct ChatCompletionTool {
741    pub function: shared::FunctionDefinition,
742
743    /// The type of the tool. Currently, only `function` is supported.
744    pub tool_type: String, // "function"
745}
746
747/// Controls which (if any) tool is called by the model. `none` means the model will
748/// not call any tool and instead generates a message. `auto` means the model can
749/// pick between generating a message or calling one or more tools. `required` means
750/// the model must call one or more tools. Specifying a particular tool via
751/// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
752/// call that tool.
753///
754/// `none` is the default when no tools are present. `auto` is the default if tools
755/// are present.
756///
757pub type ChatCompletionToolChoiceOption = String;
758// pub type ChatCompletionToolChoiceOption = "none" | "auto" | "required" | ChatCompletionNamedToolChoice;
759
760pub struct ChatCompletionToolMessageParam {
761    /// The contents of the tool message.
762    pub content: String,
763
764    /// The role of the messages author, in this case `tool`.
765    pub role: String, // "tool"
766
767    /// Tool call that this message is responding to.
768    pub tool_call_id: String,
769}
770
771#[derive(Default, Debug, Clone, Serialize, Deserialize)]
772pub struct ChatCompletionUserMessageParam {
773    /// The contents of the user message.
774    pub content: String, // | Vec<ChatCompletionContentPart>,
775
776    /// The role of the messages author, in this case `user`.
777    pub role: String, // "user"
778
779    /// An optional name for the participant. Provides the model information to
780    /// differentiate between participants of the same role.
781    #[serde(skip_serializing_if = "Option::is_none")]
782    pub name: Option<String>,
783}
784
785// /// @deprecated ChatCompletionMessageParam should be used instead
786// export type CreateChatCompletionRequestMessage = ChatCompletionMessageParam;
787//
788// export type ChatCompletionCreateParams =
789//   | ChatCompletionCreateParamsNonStreaming
790//   | ChatCompletionCreateParamsStreaming;
791
792#[derive(Default, Debug, Clone, Deserialize, Serialize)]
793pub struct ChatCompletionCreateParams<'a> {
794    /// A list of messages comprising the conversation so far.
795    /// [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).
796    /// pub enum ChatCompletionMessageParam {
797    ///
798    ///
799    pub messages: Vec<ChatCompletionMessageParam<'a>>,
800
801    /// ID of the model to use. See the
802    /// [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility)
803    /// table for details on which models work with the Chat API.
804    pub model: &'a str, // (string & {}) | ChatAPI.ChatModel,
805
806    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their
807    /// existing frequency in the text so far, decreasing the model's likelihood to
808    /// repeat the same line verbatim.
809    ///
810    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
811    #[serde(skip_serializing_if = "Option::is_none")]
812    pub frequency_penalty: Option<f32>,
813
814    /// Deprecated in favor of `tool_choice`.
815    ///
816    /// Controls which (if any) function is called by the model. `none` means the model
817    /// will not call a function and instead generates a message. `auto` means the model
818    /// can pick between generating a message or calling a function. Specifying a
819    /// particular function via `{"name": "my_function"}` forces the model to call that
820    /// function.
821    ///
822    /// `none` is the default when no functions are present. `auto` is the default if
823    /// functions are present.
824    #[serde(skip_serializing_if = "Option::is_none")]
825    pub function_call: Option<String>,
826    // !TODO create an enum
827    // pub function_call: Option<"none" | "auto" | ChatCompletionFunctionCallOption>,
828    /// Deprecated in favor of `tools`.
829    ///
830    /// A list of functions the model may generate JSON inputs for.
831    #[serde(skip_serializing_if = "Option::is_none")]
832    pub functions: Option<Vec<chat_completion_create_params::Function>>,
833
834    /// Modify the likelihood of specified tokens appearing in the completion.
835    ///
836    /// Accepts a JSON object that maps tokens (specified by their token ID in the
837    /// tokenizer) to an associated bias value from -100 to 100. Mathematically, the
838    /// bias is added to the logits generated by the model prior to sampling. The exact
839    /// effect will vary per model, but values between -1 and 1 should decrease or
840    /// increase likelihood of selection; values like -100 or 100 should result in a ban
841    /// or exclusive selection of the relevant token.
842    #[serde(skip_serializing_if = "Option::is_none")]
843    pub logit_bias: Option<HashMap<String, f32>>,
844
845    /// Whether to return log probabilities of the output tokens or not. If true,
846    /// returns the log probabilities of each output token returned in the `content` of
847    /// `message`.
848    #[serde(skip_serializing_if = "Option::is_none")]
849    pub logprobs: Option<bool>,
850
851    /// The maximum number of [tokens](/tokenizer) that can be generated in the chat
852    /// completion.
853    ///
854    /// The total length of input tokens and generated tokens is limited by the model's
855    /// context length.
856    /// [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
857    /// for counting tokens.
858    #[serde(skip_serializing_if = "Option::is_none")]
859    pub max_tokens: Option<u32>,
860
861    /// How many chat completion choices to generate for each input message. Note that
862    /// you will be charged based on the number of generated tokens across all of the
863    /// choices. Keep `n` as `1` to minimize costs.
864    #[serde(skip_serializing_if = "Option::is_none")]
865    pub n: Option<u32>,
866
867    /// Whether to enable
868    /// [parallel function calling](https://platform.openai.com/docs/guides/function-calling/parallel-function-calling)
869    /// during tool use.
870    #[serde(skip_serializing_if = "Option::is_none")]
871    pub parallel_tool_calls: Option<bool>,
872
873    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on
874    /// whether they appear in the text so far, increasing the model's likelihood to
875    /// talk about new topics.
876    ///
877    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
878    #[serde(skip_serializing_if = "Option::is_none")]
879    pub presence_penalty: Option<f32>,
880
881    /// An object specifying the format that the model must output. Compatible with
882    /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
883    /// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
884    ///
885    /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
886    /// message the model generates is valid JSON.
887    ///
888    /// **Important:** when using JSON mode, you **must** also instruct the model to
889    /// produce JSON yourself via a system or user message. Without this, the model may
890    /// generate an unending stream of whitespace until the generation reaches the token
891    /// limit, resulting in a long-running and seemingly "stuck" request. Also note that
892    /// the message content may be partially cut off if `finish_reason="length"`, which
893    /// indicates the generation exceeded `max_tokens` or the conversation exceeded the
894    /// max context length.
895    #[serde(skip_serializing_if = "Option::is_none")]
896    pub response_format: Option<chat_completion_create_params::ResponseFormat>,
897    // !TODO
898    // pub response_format: Option<ChatCompletionCreateParams.ResponseFormat>,
899    /// This feature is in Beta. If specified, our system will make a best effort to
900    /// sample deterministically, such that repeated requests with the same `seed` and
901    /// parameters should return the same result. Determinism is not guaranteed, and you
902    /// should refer to the `system_fingerprint` response parameter to monitor changes
903    /// in the backend.
904    #[serde(skip_serializing_if = "Option::is_none")]
905    pub seed: Option<u32>,
906
907    /// Specifies the latency tier to use for processing the request. This parameter is
908    /// relevant for customers subscribed to the scale tier service:
909    ///
910    /// - If set to 'auto', the system will utilize scale tier credits until they are
911    ///   exhausted.
912    /// - If set to 'default', the request will be processed using the default service
913    ///   tier with a lower uptime SLA and no latency guarentee.
914    ///
915    /// When this parameter is set, the response body will include the `service_tier`
916    /// utilized.
917    #[serde(skip_serializing_if = "Option::is_none")]
918    pub service_tier: Option<ServiceTier>,
919
920    /// Up to 4 sequences where the API will stop generating further tokens.
921    #[serde(skip_serializing_if = "Option::is_none")]
922    pub stop: Option<Value>,
923    // pub stop: Option<String | Vec<String>>,
924    /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
925    /// sent as data-only
926    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
927    /// as they become available, with the stream terminated by a `data: [DONE]`
928    /// message.
929    /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
930    #[serde(skip_serializing_if = "Option::is_none")]
931    pub stream: Option<bool>,
932
933    /// Options for streaming response. Only set this when you set `stream: true`.
934    #[serde(skip_serializing_if = "Option::is_none")]
935    pub stream_options: Option<ChatCompletionStreamOptions>,
936
937    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
938    /// make the output more random, while lower values like 0.2 will make it more
939    /// focused and deterministic.
940    ///
941    /// We generally recommend altering this or `top_p` but not both.
942    #[serde(skip_serializing_if = "Option::is_none")]
943    pub temperature: Option<f32>,
944
945    /// Controls which (if any) tool is called by the model. `none` means the model will
946    /// not call any tool and instead generates a message. `auto` means the model can
947    /// pick between generating a message or calling one or more tools. `required` means
948    /// the model must call one or more tools. Specifying a particular tool via
949    /// `{"type": "function", "function": {"name": "my_function"}}` forces the model to
950    /// call that tool.
951    ///
952    /// `none` is the default when no tools are present. `auto` is the default if tools
953    /// are present.
954    #[serde(skip_serializing_if = "Option::is_none")]
955    pub tool_choice: Option<ChatCompletionToolChoiceOption>,
956
957    /// A list of tools the model may call. Currently, only functions are supported as a
958    /// tool. Use this to provide a list of functions the model may generate JSON inputs
959    /// for. A max of 128 functions are supported.
960    #[serde(skip_serializing_if = "Option::is_none")]
961    pub tools: Option<Vec<ChatCompletionTool>>,
962
963    /// An integer between 0 and 20 specifying the number of most likely tokens to
964    /// return at each token position, each with an associated log probability.
965    /// `logprobs` must be set to `true` if this parameter is used.
966    #[serde(skip_serializing_if = "Option::is_none")]
967    pub top_logprobs: Option<u8>,
968
969    /// An alternative to sampling with temperature, called nucleus sampling, where the
970    /// model considers the results of the tokens with top_p probability mass. So 0.1
971    /// means only the tokens comprising the top 10% probability mass are considered.
972    ///
973    /// We generally recommend altering this or `temperature` but not both.
974    #[serde(skip_serializing_if = "Option::is_none")]
975    pub top_p: Option<f32>,
976
977    /// A unique identifier representing your end-user, which can help OpenAI to monitor
978    /// and detect abuse.
979    /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
980    #[serde(skip_serializing_if = "Option::is_none")]
981    pub user: Option<String>,
982}
983
984pub mod chat_completion_create_params {
985    use super::*;
986
987    #[derive(Default, Debug, Clone, Deserialize, Serialize)]
988    #[deprecated]
989    pub struct Function {
990        /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
991        /// underscores and dashes, with a maximum length of 64.
992        pub name: String,
993
994        /// A description of what the function does, used by the model to choose when and
995        /// how to call the function.
996        #[serde(skip_serializing_if = "Option::is_none")]
997        pub description: Option<String>,
998
999        /// The parameters the functions accepts, described as a JSON Schema object. See the
1000        /// [guide](https://platform.openai.com/docs/guides/function-calling) for examples,
1001        /// and the
1002        /// [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
1003        /// documentation about the format.
1004        ///
1005        /// Omitting `parameters` defines a function with an empty parameter list.
1006        #[serde(skip_serializing_if = "Option::is_none")]
1007        pub parameters: Option<shared::FunctionParameters>,
1008    }
1009
1010    /// An object specifying the format that the model must output. Compatible with
1011    /// [GPT-4 Turbo](https://platform.openai.com/docs/models/gpt-4-and-gpt-4-turbo) and
1012    /// all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.
1013    ///
1014    /// Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the
1015    /// message the model generates is valid JSON.
1016    ///
1017    /// **Important:** when using JSON mode, you **must** also instruct the model to
1018    /// produce JSON yourself via a system or user message. Without this, the model may
1019    /// generate an unending stream of whitespace until the generation reaches the token
1020    /// limit, resulting in a long-running and seemingly "stuck" request. Also note that
1021    /// the message content may be partially cut off if `finish_reason="length"`, which
1022    /// indicates the generation exceeded `max_tokens` or the conversation exceeded the
1023    /// max context length.
1024    #[derive(Default, Debug, Clone, Serialize, Deserialize)]
1025    pub struct ResponseFormat {
1026        /// Must be one of `text` or `json_object`.
1027        #[serde(rename = "type", skip_serializing_if = "Option::is_none")]
1028        response_format_type: Option<String>, // 'text' | 'json_object';
1029    }
1030
1031    //   export type ChatCompletionCreateParamsNonStreaming =
1032    //     ChatCompletionsAPI.ChatCompletionCreateParamsNonStreaming;
1033    //   export type ChatCompletionCreateParamsStreaming = ChatCompletionsAPI.ChatCompletionCreateParamsStreaming;
1034}
1035
1036// /// @deprecated Use ChatCompletionCreateParams instead
1037// export type CompletionCreateParams = ChatCompletionCreateParams;
1038
1039#[derive(Default, Debug, Clone, Deserialize, Serialize)]
1040pub struct ChatCompletionCreateParamsNonStreaming {
1041    // extends ChatCompletionCreateParamsBase
1042    /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
1043    /// sent as data-only
1044    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
1045    /// as they become available, with the stream terminated by a `data: [DONE]`
1046    /// message.
1047    /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
1048    #[serde(skip_serializing_if = "Option::is_none")]
1049    pub stream: Option<bool>, // false | null
1050}
1051
1052// /// @deprecated Use ChatCompletionCreateParamsNonStreaming instead
1053// export type CompletionCreateParamsNonStreaming = ChatCompletionCreateParamsNonStreaming;
1054//
1055pub struct ChatCompletionCreateParamsStreaming {
1056    // extends ChatCompletionCreateParamsBase
1057    /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be
1058    /// sent as data-only
1059    /// [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
1060    /// as they become available, with the stream terminated by a `data: [DONE]`
1061    /// message.
1062    /// [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
1063    pub stream: bool, // true
1064}
1065
1066// /// @deprecated Use ChatCompletionCreateParamsStreaming instead
1067// export type CompletionCreateParamsStreaming = ChatCompletionCreateParamsStreaming;