chat_gpt_rs/request.rs
1//! Data structures for the request to the OpenAI API amd response from the OpenAI API.
2
3#[derive(serde::Serialize, Default)]
4/// gpt-3.5 and gpt-4 models that are supported by the OpenAI API.
5pub enum Model {
6 /// gpt-3.5-turbo model.
7 #[default]
8 #[serde(rename = "gpt-3.5-turbo")]
9 Gpt35Turbo,
10
11 /// gpt-3.5-turbo-0301 model.
12 #[serde(rename = "gpt-3.5-turbo-0301")]
13 Gpt35Turbo0301,
14
15 #[serde(rename = "gpt-4")]
16 Gpt4,
17
18 #[serde(rename = "gpt-4-0314")]
19 Gpt4_0314,
20
21 #[serde(rename = "gpt-4-32k")]
22 Gpt4_32k,
23
24 #[serde(rename = "gpt-4-32k-0314")]
25 Gpt4_32k0314,
26
27}
28
29#[derive(serde::Deserialize)]
30/// A chat completion choice.
31pub struct Choice {
32 /// index of the message in the request.
33 pub index: i32,
34
35 /// text of the chat completion.
36 pub message: Message,
37
38 /// finish reason for the chat completion.
39 pub finish_reason: String,
40}
41
42#[derive(serde::Deserialize, serde::Serialize)]
43/// A message in the chat format.
44pub struct Message {
45 /// role for the message, either "user" or "assistant".
46 pub role: String,
47
48 /// message content.
49 pub content: String,
50}
51
52#[derive(serde::Deserialize)]
53/// usage information for the OpenAI API.
54pub struct Usage {
55 /// how many tokens were used for the prompt.
56 pub prompt_tokens: i32,
57
58 /// how many tokens were used for the chat completion.
59 pub completion_tokens: i32,
60
61 /// how many tokens were used for the entire request.
62 pub total_tokens: i32,
63}
64
65#[derive(serde::Serialize, Default)]
66/// request to the OpenAI API.
67pub struct Request {
68 /// ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported.
69 pub model: Model,
70
71 /// Messages to generate chat completions for, in the chat format.
72 pub messages: Vec<Message>,
73
74 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
75 #[serde(skip_serializing_if = "Option::is_none")]
76 pub temperature: Option<f64>,
77
78 /// Alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass.
79 #[serde(skip_serializing_if = "Option::is_none")]
80 pub top_p: Option<f64>,
81
82 /// How many chat completion choices to generate for each input message.
83 #[serde(skip_serializing_if = "Option::is_none")]
84 pub n: Option<i32>,
85
86 /// Up to 4 sequences where the API will stop generating further tokens.
87 #[serde(skip_serializing_if = "Option::is_none")]
88 pub stop: Option<Vec<String>>,
89
90 /// Maximum number of tokens to generate for each chat completion choice.
91 #[serde(skip_serializing_if = "Option::is_none")]
92 pub max_tokens: Option<i32>,
93
94 /// A number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
95 #[serde(skip_serializing_if = "Option::is_none")]
96 pub presence_penalty: Option<f64>,
97
98 /// A number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
99 #[serde(skip_serializing_if = "Option::is_none")]
100 pub frequency_penalty: Option<f64>,
101
102 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
103 #[serde(skip_serializing_if = "Option::is_none")]
104 pub user: Option<String>,
105}
106
107#[derive(serde::Deserialize)]
108/// response from the OpenAI API.
109pub struct Response {
110 /// ID of the request.
111 pub id: String,
112 pub object: String,
113
114 /// when the request was created.
115 pub created: i64,
116
117 /// list of chat completion choices.
118 pub choices: Vec<Choice>,
119
120 /// usage information for the request.
121 pub usage: Usage,
122}