openai_rust/
edits.rs

1//! See <https://platform.openai.com/docs/api-reference/edits>.
2//! Use with [Client::create_edit](crate::Client::create_edit).
3#![deprecated = "Use the chat api instead"]
4
5use serde::{Deserialize, Serialize};
6
7/// Request arguments for edits.
8///
9/// See <https://platform.openai.com/docs/api-reference/edits/create>.
10///
11/// ```
12/// openai_rust::edits::EditArguments::new(
13///     "text-davinci-edit-001",
14///     "The quick brown fox".to_owned(),
15///     "Complete this sentence.".to_owned()
16/// );
17/// ```
18#[derive(Serialize, Debug, Clone)]
19pub struct EditArguments {
20    /// ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
21    pub model: String,
22
23    /// The input text to use as a starting point for the edit.
24    pub input: Option<String>,
25
26    /// The instruction that tells the model how to edit the prompt.
27    pub instruction: String,
28
29    //How many edits to generate for the input and instruction.
30    #[serde(skip_serializing_if = "Option::is_none")]
31    pub n: Option<u32>,
32
33    /// What sampling temperature to use, between 0 and 2.
34    /// Higher values like 0.8 will make the output more random,
35    /// while lower values like 0.2 will make it more focused and deterministic.
36    ///
37    /// We generally recommend altering this or `top_p` but not both.
38    #[serde(skip_serializing_if = "Option::is_none")]
39    pub temperature: Option<f32>,
40
41    /// An alternative to sampling with temperature, called nucleus sampling,
42    /// where the model considers the results of the tokens with top_p probability mass.
43    /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
44    ///
45    /// We generally recommend altering this or `temperature` but not both.
46    #[serde(skip_serializing_if = "Option::is_none")]
47    pub top_p: Option<f32>,
48}
49
50impl EditArguments {
51    pub fn new(model: impl AsRef<str>, input: String, instruction: String) -> EditArguments {
52        EditArguments {
53            model: model.as_ref().to_owned(),
54            input: Some(input),
55            instruction: instruction,
56            n: None,
57            temperature: None,
58            top_p: None,
59        }
60    }
61}
62
63/// The response of an edit request.
64/// ```
65/// /// # use serde_json;
66/// # let json = "{
67/// #  \"object\": \"edit\",
68/// #  \"created\": 1589478378,
69/// #  \"choices\": [
70/// #    {
71/// #      \"text\": \"What day of the week is it?\",
72/// #      \"index\": 0
73/// #    }
74/// #  ],
75/// #  \"usage\": {
76/// #    \"prompt_tokens\": 25,
77/// #    \"completion_tokens\": 32,
78/// #    \"total_tokens\": 57
79/// #  }
80/// # }";
81/// # let res = serde_json::from_str::<openai_rust::edits::EditResponse>(json).unwrap();
82/// let text = &res.choices[0].text;
83/// // or
84/// let text = res.to_string();
85/// ```
86/// It implements [Display](std::fmt::Display) as a shortcut to easily extract the content.
87
88#[derive(Deserialize, Debug, Clone)]
89pub struct EditResponse {
90    pub created: u32,
91    pub choices: Vec<Choice>,
92    pub usage: Usage,
93}
94
95impl std::fmt::Display for EditResponse {
96    /// Automatically grab the first choice
97    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
98        write!(f, "{}", self.choices[0].text)?;
99        Ok(())
100    }
101}
102
103/// The completion choices of an edit response.
104#[derive(Deserialize, Debug, Clone)]
105pub struct Choice {
106    pub text: String,
107    pub index: u32,
108}
109
110/// Infomration about the tokens used by [EditResponse].
111#[derive(Deserialize, Debug, Clone)]
112pub struct Usage {
113    pub prompt_tokens: u32,
114    pub completion_tokens: u32,
115    pub total_tokens: u32,
116}