async_openai/types/completion.rs
1use std::{collections::HashMap, pin::Pin};
2
3use derive_builder::Builder;
4use futures::Stream;
5use serde::{Deserialize, Serialize};
6
7use crate::error::OpenAIError;
8
9use crate::types::chat::{
10 ChatCompletionStreamOptions, Choice, CompletionUsage, Prompt, StopConfiguration,
11};
12
13#[derive(Clone, Serialize, Deserialize, Default, Debug, Builder, PartialEq)]
14#[builder(name = "CreateCompletionRequestArgs")]
15#[builder(pattern = "mutable")]
16#[builder(setter(into, strip_option), default)]
17#[builder(derive(Debug))]
18#[builder(build_fn(error = "OpenAIError"))]
19pub struct CreateCompletionRequest {
20 /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
21 pub model: String,
22
23 /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
24 ///
25 /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
26 pub prompt: Prompt,
27
28 /// The suffix that comes after a completion of inserted text.
29 ///
30 /// This parameter is only supported for `gpt-3.5-turbo-instruct`.
31 #[serde(skip_serializing_if = "Option::is_none")]
32 pub suffix: Option<String>, // default: null
33
34 /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion.
35 ///
36 /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
37 #[serde(skip_serializing_if = "Option::is_none")]
38 pub max_tokens: Option<u32>,
39
40 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
41 ///
42 /// We generally recommend altering this or `top_p` but not both.
43 #[serde(skip_serializing_if = "Option::is_none")]
44 pub temperature: Option<f32>, // min: 0, max: 2, default: 1,
45
46 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
47 ///
48 /// We generally recommend altering this or `temperature` but not both.
49 #[serde(skip_serializing_if = "Option::is_none")]
50 pub top_p: Option<f32>, // min: 0, max: 1, default: 1
51
52 /// How many completions to generate for each prompt.
53
54 /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
55 ///
56 #[serde(skip_serializing_if = "Option::is_none")]
57 pub n: Option<u8>, // min:1 max: 128, default: 1
58
59 /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
60 /// as they become available, with the stream terminated by a `data: [DONE]` message.
61 #[serde(skip_serializing_if = "Option::is_none")]
62 pub stream: Option<bool>, // nullable: true
63
64 #[serde(skip_serializing_if = "Option::is_none")]
65 pub stream_options: Option<ChatCompletionStreamOptions>,
66
67 /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.
68 ///
69 /// The maximum value for `logprobs` is 5.
70 #[serde(skip_serializing_if = "Option::is_none")]
71 pub logprobs: Option<u8>, // min:0 , max: 5, default: null, nullable: true
72
73 /// Echo back the prompt in addition to the completion
74 #[serde(skip_serializing_if = "Option::is_none")]
75 pub echo: Option<bool>,
76
77 /// Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
78 #[serde(skip_serializing_if = "Option::is_none")]
79 pub stop: Option<StopConfiguration>,
80
81 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
82 ///
83 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
84 #[serde(skip_serializing_if = "Option::is_none")]
85 pub presence_penalty: Option<f32>, // min: -2.0, max: 2.0, default 0
86
87 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
88 ///
89 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
90 #[serde(skip_serializing_if = "Option::is_none")]
91 pub frequency_penalty: Option<f32>, // min: -2.0, max: 2.0, default: 0
92
93 /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
94 ///
95 /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.
96 ///
97 /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
98 #[serde(skip_serializing_if = "Option::is_none")]
99 pub best_of: Option<u8>, //min: 0, max: 20, default: 1
100
101 /// Modify the likelihood of specified tokens appearing in the completion.
102 ///
103 /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
104 ///
105 /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated.
106 #[serde(skip_serializing_if = "Option::is_none")]
107 pub logit_bias: Option<HashMap<String, serde_json::Value>>, // default: null
108
109 /// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
110 #[serde(skip_serializing_if = "Option::is_none")]
111 pub user: Option<String>,
112
113 /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
114 ///
115 /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
116 #[serde(skip_serializing_if = "Option::is_none")]
117 pub seed: Option<i64>,
118}
119
120#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
121pub struct CreateCompletionResponse {
122 /// A unique identifier for the completion.
123 pub id: String,
124 pub choices: Vec<Choice>,
125 /// The Unix timestamp (in seconds) of when the completion was created.
126 pub created: u32,
127
128 /// The model used for completion.
129 pub model: String,
130 /// This fingerprint represents the backend configuration that the model runs with.
131 ///
132 /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been
133 /// made that might impact determinism.
134 pub system_fingerprint: Option<String>,
135
136 /// The object type, which is always "text_completion"
137 pub object: String,
138 pub usage: Option<CompletionUsage>,
139}
140
141/// Parsed server side events stream until an \[DONE\] is received from server.
142pub type CompletionResponseStream =
143 Pin<Box<dyn Stream<Item = Result<CreateCompletionResponse, OpenAIError>> + Send>>;