dynamo_async_openai/types/
completion.rs

1// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2// SPDX-License-Identifier: Apache-2.0
3//
4// Based on https://github.com/64bit/async-openai/ by Himanshu Neema
5// Original Copyright (c) 2022 Himanshu Neema
6// Licensed under MIT License (see ATTRIBUTIONS-Rust.md)
7//
8// Modifications Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES.
9// Licensed under Apache 2.0
10
11use std::{collections::HashMap, pin::Pin};
12
13use derive_builder::Builder;
14use futures::Stream;
15use serde::{Deserialize, Serialize};
16
17use crate::error::OpenAIError;
18
19use super::{ChatCompletionStreamOptions, Choice, CompletionUsage, Prompt, Stop};
20
21#[derive(Clone, Serialize, Deserialize, Default, Debug, Builder, PartialEq)]
22#[builder(name = "CreateCompletionRequestArgs")]
23#[builder(pattern = "mutable")]
24#[builder(setter(into, strip_option), default)]
25#[builder(derive(Debug))]
26#[builder(build_fn(error = "OpenAIError"))]
27pub struct CreateCompletionRequest {
28    /// ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
29    pub model: String,
30
31    /// The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays.
32    ///
33    /// Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document.
34    pub prompt: Prompt,
35
36    /// The suffix that comes after a completion of inserted text.
37    ///
38    /// This parameter is only supported for `gpt-3.5-turbo-instruct`.
39    #[serde(skip_serializing_if = "Option::is_none")]
40    pub suffix: Option<String>, // default: null
41
42    /// The maximum number of [tokens](https://platform.openai.com/tokenizer) that can be generated in the completion.
43    ///
44    /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
45    #[serde(skip_serializing_if = "Option::is_none")]
46    pub max_tokens: Option<u32>,
47
48    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
49    ///
50    /// We generally recommend altering this or `top_p` but not both.
51    #[serde(skip_serializing_if = "Option::is_none")]
52    pub temperature: Option<f32>, // min: 0, max: 2, default: 1,
53
54    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
55    ///
56    ///  We generally recommend altering this or `temperature` but not both.
57    #[serde(skip_serializing_if = "Option::is_none")]
58    pub top_p: Option<f32>, // min: 0, max: 1, default: 1
59
60    /// How many completions to generate for each prompt.
61
62    /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
63    ///
64    #[serde(skip_serializing_if = "Option::is_none")]
65    pub n: Option<u8>, // min:1 max: 128, default: 1
66
67    /// Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
68    /// as they become available, with the stream terminated by a `data: [DONE]` message.
69    #[serde(skip_serializing_if = "Option::is_none")]
70    pub stream: Option<bool>, // nullable: true
71
72    #[serde(skip_serializing_if = "Option::is_none")]
73    pub stream_options: Option<ChatCompletionStreamOptions>,
74
75    /// Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.
76    ///
77    /// The maximum value for `logprobs` is 5.
78    #[serde(skip_serializing_if = "Option::is_none")]
79    pub logprobs: Option<u8>, // min:0 , max: 5, default: null, nullable: true
80
81    /// Echo back the prompt in addition to the completion
82    #[serde(skip_serializing_if = "Option::is_none")]
83    pub echo: Option<bool>,
84
85    ///  Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence.
86    #[serde(skip_serializing_if = "Option::is_none")]
87    pub stop: Option<Stop>,
88
89    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
90    ///
91    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
92    #[serde(skip_serializing_if = "Option::is_none")]
93    pub presence_penalty: Option<f32>, // min: -2.0, max: 2.0, default 0
94
95    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
96    ///
97    /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation/parameter-details)
98    #[serde(skip_serializing_if = "Option::is_none")]
99    pub frequency_penalty: Option<f32>, // min: -2.0, max: 2.0, default: 0
100
101    /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
102    ///
103    /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.
104    ///
105    /// **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
106    #[serde(skip_serializing_if = "Option::is_none")]
107    pub best_of: Option<u8>, //min: 0, max: 20, default: 1
108
109    /// Modify the likelihood of specified tokens appearing in the completion.
110    ///
111    /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
112    ///
113    /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated.
114    #[serde(skip_serializing_if = "Option::is_none")]
115    pub logit_bias: Option<HashMap<String, serde_json::Value>>, // default: null
116
117    /// A unique identifier representing your end-user, which will help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/usage-policies/end-user-ids).
118    #[serde(skip_serializing_if = "Option::is_none")]
119    pub user: Option<String>,
120
121    /// If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
122    ///
123    /// Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
124    #[serde(skip_serializing_if = "Option::is_none")]
125    pub seed: Option<i64>,
126}
127
128#[derive(Debug, Deserialize, Clone, PartialEq, Serialize)]
129pub struct CreateCompletionResponse {
130    /// A unique identifier for the completion.
131    pub id: String,
132    pub choices: Vec<Choice>,
133    /// The Unix timestamp (in seconds) of when the completion was created.
134    pub created: u32,
135
136    /// The model used for completion.
137    pub model: String,
138    /// This fingerprint represents the backend configuration that the model runs with.
139    ///
140    /// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been
141    /// made that might impact determinism.
142    pub system_fingerprint: Option<String>,
143
144    /// The object type, which is always "text_completion"
145    pub object: String,
146    pub usage: Option<CompletionUsage>,
147}
148
149/// Parsed server side events stream until an \[DONE\] is received from server.
150pub type CompletionResponseStream =
151    Pin<Box<dyn Stream<Item = Result<CreateCompletionResponse, OpenAIError>> + Send>>;