async_openai_wasm/types/
completion.rs

1use std::collections::HashMap;
2
3use derive_builder::Builder;
4use serde::{Deserialize, Serialize};
5
6use crate::client::OpenAIEventStream;
7use crate::error::OpenAIError;
8
9use super::{ChatCompletionStreamOptions, Choice, CompletionUsage, Prompt, Stop};
10
11#[derive(Clone, Serialize, Deserialize, Default, Debug, Builder, PartialEq)]
12#[builder(name = "CreateCompletionRequestArgs")]
13#[builder(pattern = "mutable")]
14#[builder(setter(into, strip_option), default)]
15#[builder(derive(Debug))]
16#[builder(build_fn(error = "OpenAIError"))]
17pub struct CreateCompletionRequest {
18    /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
19    pub model: String,
20
21    /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
22    ///
23    /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
24    pub prompt: Prompt,
25
26    /// The suffix that comes after a completion of inserted text.
27    ///
28    /// This parameter is only supported for `gpt-3.5-turbo-instruct`.
29    #[serde(skip_serializing_if = "Option::is_none")]
30    pub suffix: Option<String>, // default: null
31
32    /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion.
33    ///
34    /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
35    #[serde(skip_serializing_if = "Option::is_none")]
36    pub max_tokens: Option<u32>,
37
38    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
39    ///
40    /// We generally recommend altering this or `top_p` but not both.
41    #[serde(skip_serializing_if = "Option::is_none")]
42    pub temperature: Option<f32>, // min: 0, max: 2, default: 1,
43
44    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
45    ///
46    ///  We generally recommend altering this or `temperature` but not both.
47    #[serde(skip_serializing_if = "Option::is_none")]
48    pub top_p: Option<f32>, // min: 0, max: 1, default: 1
49
50    /// How many completions to generate for each prompt.
51
52    /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
53    ///
54    #[serde(skip_serializing_if = "Option::is_none")]
55    pub n: Option<u8>, // min:1 max: 128, default: 1
56
57    /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
58    /// as they become available, with the stream terminated by a `data: [DONE]` message.
59    #[serde(skip_serializing_if = "Option::is_none")]
60    pub stream: Option<bool>, // nullable: true
61
62    #[serde(skip_serializing_if = "Option::is_none")]
63    pub stream_options: Option<ChatCompletionStreamOptions>,
64
65    /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.
66    ///
67    /// The maximum value for `logprobs` is 5.
68    #[serde(skip_serializing_if = "Option::is_none")]
69    pub logprobs: Option<u8>, // min:0 , max: 5, default: null, nullable: true
70
71    /// Echo back the prompt in addition to the completion
72    #[serde(skip_serializing_if = "Option::is_none")]
73    pub echo: Option<bool>,
74
75    ///  Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
76    #[serde(skip_serializing_if = "Option::is_none")]
77    pub stop: Option<Stop>,
78
79    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
80    ///
81    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
82    #[serde(skip_serializing_if = "Option::is_none")]
83    pub presence_penalty: Option<f32>, // min: -2.0, max: 2.0, default 0
84
85    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
86    ///
87    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
88    #[serde(skip_serializing_if = "Option::is_none")]
89    pub frequency_penalty: Option<f32>, // min: -2.0, max: 2.0, default: 0
90
91    /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
92    ///
93    /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.
94    ///
95    /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
96    #[serde(skip_serializing_if = "Option::is_none")]
97    pub best_of: Option<u8>, //min: 0, max: 20, default: 1
98
99    /// Modify the likelihood of specified tokens appearing in the completion.
100    ///
101    /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
102    ///
103    /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated.
104    #[serde(skip_serializing_if = "Option::is_none")]
105    pub logit_bias: Option<HashMap<String, serde_json::Value>>, // default: null
106
107    /// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
108    #[serde(skip_serializing_if = "Option::is_none")]
109    pub user: Option<String>,
110
111    /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
112    ///
113    /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
114    #[serde(skip_serializing_if = "Option::is_none")]
115    pub seed: Option<i64>,
116}
117
118#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
119pub struct CreateCompletionResponse {
120    /// A unique identifier for the completion.
121    pub id: String,
122    pub choices: Vec<Choice>,
123    /// The Unix timestamp (in seconds) of when the completion was created.
124    pub created: u32,
125
126    /// The model used for completion.
127    pub model: String,
128    /// This fingerprint represents the backend configuration that the model runs with.
129    ///
130    /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been
131    /// made that might impact determinism.
132    pub system_fingerprint: Option<String>,
133
134    /// The object type, which is always "text_completion"
135    pub object: String,
136    pub usage: Option<CompletionUsage>,
137}
138
139/// Parsed server side events stream until an \[DONE\] is received from server.
140pub type CompletionResponseStream = OpenAIEventStream<CreateCompletionResponse>;