openai_rs_api/core/models.rs
1#![allow(dead_code)]
2
3
4#[cfg(feature = "list_models")]
5pub mod list_models {
6 use serde::Deserialize;
7
8 #[derive(Debug, Deserialize)]
9 pub struct ModelPermission {
10 /// The ID of the permission.
11 pub id: String,
12 /// The type of object returned by the API. In this case, it will always be "model_permission".
13 pub object: String,
14 /// The Unix timestamp (in seconds) when the permission was created.
15 pub created: i64,
16 /// Whether the permission allows creating engines.
17 pub allow_create_engine: bool,
18 /// Whether the permission allows sampling.
19 pub allow_sampling: bool,
20 /// Whether the permission allows log probabilities.
21 pub allow_logprobs: bool,
22 /// Whether the permission allows search indices.
23 pub allow_search_indices: bool,
24 /// Whether the permission allows viewing.
25 pub allow_view: bool,
26 /// Whether the permission allows fine-tuning.
27 pub allow_fine_tuning: bool,
28 /// The ID of the organization that the permission belongs to.
29 pub organization: String,
30 /// The ID of the group that the permission belongs to.
31 pub group: Option<String>,
32 /// Whether the permission is blocking.
33 pub is_blocking: bool,
34 }
35
36 #[derive(Debug, Deserialize)]
37 pub struct Model {
38 /// The ID of the model.
39 pub id: String,
40 /// The type of object returned by the API. In this case, it will always be "model".
41 pub object: String,
42 /// The Unix timestamp (in seconds) when the model was created.
43 pub created: i64,
44 /// The ID of the organization that owns the model.
45 pub owned_by: String,
46 /// A list of `ModelPermission` objects representing the permissions for the model.
47 pub permission: Vec<ModelPermission>,
48 /// The ID of the root model that this model was created from.
49 pub root: String,
50 /// The ID of the parent model that this model was created from.
51 pub parent: Option<String>,
52 }
53
54 #[derive(Debug, Deserialize)]
55 pub struct ModelList {
56 /// The type of object returned by the API. In this case, it will always be "list".
57 pub object: String,
58 /// A vector of `Model` objects representing the models returned by the API.
59 pub data: Vec<Model>,
60 }
61}
62
63#[cfg(feature = "edits")]
64pub mod edits {
65 use serde::{Deserialize, Serialize};
66
67 #[derive(Debug, Deserialize, Serialize)]
68 pub struct EditParameters {
69 /// ID of the model to use. You can use the `text-davinci-edit-001` or `code-davinci-edit-001` model with this endpoint.
70 model: String,
71 /// The input text to use as a starting point for the edit.
72 input: String,
73 /// The instruction that tells the model how to edit the prompt.
74 instructions: String,
75 /// How many edits to generate for the input and instruction.
76 #[serde(skip_serializing_if = "Option::is_none")]
77 n_of_edits: Option<i32>,
78 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.
79 ///
80 /// We generally recommend altering this or `top_p` but not both.
81 #[serde(skip_serializing_if = "Option::is_none")]
82 temperature: Option<f32>,
83 /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
84 ///
85 /// We generally recommend altering this or `temperature` but not both.
86 #[serde(skip_serializing_if = "Option::is_none")]
87 top_p: Option<f32>,
88 }
89
90 impl EditParameters {
91 // builder pattern
92 pub fn new(model: String, input: String, instructions: String) -> Self {
93 Self {
94 model,
95 input,
96 instructions,
97 n_of_edits: None,
98 temperature: None,
99 top_p: None,
100 }
101 }
102
103 pub fn n_of_edits(mut self, n_of_edits: i32) -> Self {
104 self.n_of_edits = Some(n_of_edits);
105 self
106 }
107
108 pub fn temperature(mut self, temperature: f32) -> Self {
109 self.temperature = Some(temperature);
110 self
111 }
112
113 pub fn top_p(mut self, top_p: f32) -> Self {
114 self.top_p = Some(top_p);
115 self
116 }
117
118 pub fn build(self) -> EditParameters {
119 EditParameters {
120 model: self.model,
121 input: self.input,
122 instructions: self.instructions,
123 n_of_edits: self.n_of_edits,
124 temperature: self.temperature,
125 top_p: self.top_p,
126 }
127 }
128 }
129
130 #[derive(Debug, Deserialize)]
131 pub struct EditResponse {
132 /// The type of object returned by the API. In this case, it will always be "text_completion".
133 object: String,
134 /// The Unix timestamp (in seconds) when the completion was generated.
135 created: i64,
136 /// A list of `Choice` objects representing the generated completions.
137 choices: Vec<Choice>,
138 /// An object containing information about the number of tokens used in the prompt and generated completion.
139 usage: Usage,
140 }
141
142 #[derive(Debug, Deserialize)]
143 pub struct Choice {
144 /// The generated text for this choice.
145 text: String,
146 /// The index of this choice in the list of choices returned by the API.
147 index: i32,
148 }
149
150 #[derive(Debug, Deserialize)]
151 pub struct Usage {
152 /// The number of tokens in the prompt.
153 prompt_tokens: i32,
154 /// The number of tokens in the generated completion.
155 completion_tokens: i32,
156 /// The total number of tokens used (prompt + completion).
157 total_tokens: i32,
158 }
159}
160
161#[cfg(feature = "completions")]
162pub mod completions {
163
164 use serde::{Deserialize, Serialize};
165
166 #[derive(Debug, Deserialize, Serialize)]
167 pub struct CompletionParameters {
168 /// ID of the model to use. You can use the List models API to see all of your available models,
169 /// or see our [Model overview](https://platform.openai.com/docs/models/overview) for descriptions of them.
170 ///
171 /// List models example:
172 /// ```rust
173 /// use openai_rs_api::core::{OpenAI, models::list_models::ModelList};
174 /// use tokio;
175 ///
176 /// #[tokio::main]
177 /// async fn main() -> Result<(), Box<dyn std::error::Error>> {
178 /// let openai = OpenAI::new("your_api_key", "your_organization_id");
179 /// let models: ModelList = openai.list_models().await?;
180 /// println!("{:#?}", models);
181 /// }
182 /// ```
183 ///
184 pub model: String,
185 /// The prompt(s) to generate completions for, encoded as a string, array of strings,
186 /// array of tokens, or array of token arrays.
187 ///
188 /// Note that <|endoftext|> is the document separator that the model sees during training,
189 /// so if a prompt is not specified the model will generate as if from the beginning of a new document.
190 pub prompt: String,
191 /// The maximum number of [tokens](https://platform.openai.com/tokenizer) to generate in the completion.
192 ///
193 /// The token count of your prompt plus `max_tokens` cannot exceed the model's context length.
194 /// [Example Python code](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb)
195 /// for counting tokens.
196 #[serde(skip_serializing_if = "Option::is_none")]
197 pub max_tokens: Option<i32>,
198 /// What sampling temperature to use, between 0 and 2.
199 /// Higher values like 0.8 will make the output more random, while lower values
200 /// like 0.2 will make it more focused and deterministic.
201 ///
202 /// We generally recommend altering this or `top_p` but not both.
203 #[serde(skip_serializing_if = "Option::is_none")]
204 pub temperature: Option<f32>,
205 /// The suffix that comes after a completion of inserted text.
206 #[serde(skip_serializing_if = "Option::is_none")]
207 pub suffix: Option<String>,
208 /// An alternative to sampling with temperature, called nucleus sampling,
209 /// where the model considers the results of the tokens with top_p probability mass.
210 /// So 0.1 means only the tokens comprising the top 10% probability mass are considered.
211 ///
212 /// We generally recommend altering this or `temperature` but not both.
213 #[serde(skip_serializing_if = "Option::is_none")]
214 pub top_p: Option<f32>,
215 /// How many completions to generate for each prompt.
216 ///
217 /// Note: Because this parameter generates many completions, it can quickly consume your token quota.
218 /// Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
219 #[serde(skip_serializing_if = "Option::is_none")]
220 pub n: Option<i32>,
221 /// Whether to stream back partial progress. If set, tokens
222 /// will be sent as data-only server-sent events as they become available,
223 /// with the stream terminated by a `data: [DONE]` message.
224 /// [Example Python code.](https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb)
225 #[serde(skip_serializing_if = "Option::is_none")]
226 pub stream: Option<bool>,
227 /// Include the log probabilities on the `logprobs` most likely tokens, as well the chosen tokens.
228 /// For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens.
229 /// The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response.
230 ///
231 /// The maximum value for logprobs is 5.
232 #[serde(skip_serializing_if = "Option::is_none")]
233 pub logprobs: Option<i32>,
234 /// Up to 4 sequences where the API will stop generating further tokens.
235 /// The returned text will not contain the stop sequence.
236 #[serde(skip_serializing_if = "Option::is_none")]
237 pub stop: Option<String>,
238 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether
239 /// they appear in the text so far, increasing the model's likelihood to talk about new topics.
240 ///
241 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)
242 #[serde(skip_serializing_if = "Option::is_none")]
243 pub presence_penalty: Option<f32>,
244 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency
245 /// in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
246 ///
247 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)
248 #[serde(skip_serializing_if = "Option::is_none")]
249 pub frequency_penalty: Option<f32>,
250 /// Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed.
251 ///
252 /// When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`.
253 ///
254 /// Note: Because this parameter generates many completions, it can quickly consume your token quota.
255 /// Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`.
256 #[serde(skip_serializing_if = "Option::is_none")]
257 pub best_of: Option<i32>,
258 /// Modify the likelihood of specified tokens appearing in the completion.
259 ///
260 /// Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer)
261 /// to an associated bias value from -100 to 100. You can use this [tokenizer tool](https://platform.openai.com/tokenizer?view=bpe)
262 /// (which works for both GPT-2 and GPT-3) to convert text to token IDs.
263 /// Mathematically, the bias is added to the logits generated by the model prior to sampling.
264 /// The exact effect will vary per model, but values between -1 and 1 should decrease or
265 /// increase likelihood of selection; values like -100 or 100 should result in a ban or
266 /// exclusive selection of the relevant token.
267 ///
268 /// As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated.
269 #[serde(skip_serializing_if = "Option::is_none")]
270 pub logit_bias: Option<serde_json::Value>,
271 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
272 #[serde(skip_serializing_if = "Option::is_none")]
273 pub user: Option<String>,
274 /// Echo back the prompt in addition to the completion
275 #[serde(skip_serializing_if = "Option::is_none")]
276 pub echo: Option<bool>,
277 }
278
279 #[derive(Debug, Serialize, Deserialize)]
280 pub struct CompletionResponse {
281 /// The unique identifier for the completion request.
282 pub id: String,
283 /// The type of object, which is always "text_completion".
284 pub object: String,
285 /// The Unix timestamp (in seconds) when the completion request was created.
286 pub created: i64,
287 /// The ID of the model used to generate the completion.
288 pub model: String,
289 /// A vector of `CompletionChoice` objects, each representing a possible completion.
290 pub choices: Vec<CompletionChoice>,
291 /// An object containing usage statistics for the completion request.
292 pub usage: Usage,
293 }
294
295 #[derive(Debug, Serialize, Deserialize)]
296 pub struct CompletionChoice {
297 /// The generated text for this completion choice.
298 pub text: String,
299 /// The index of this completion choice in the list of all possible choices.
300 pub index: i32,
301 /// The log probabilities of the tokens in the generated text.
302 /// If the `logprobs` parameter was not set in the request, this field will be `None`.
303 pub logprobs: Option<i32>,
304 /// The reason why the completion was finished.
305 /// Possible values are "stop", "length", "temperature", "top_p", "nucleus_sampling", and "incomplete".
306 pub finish_reason: String,
307 }
308
309 #[derive(Debug, Serialize, Deserialize)]
310 pub struct Usage {
311 /// prompt_tokens: an integer representing the number of tokens in the prompt used for the completion request.
312 pub prompt_tokens: i32,
313 /// completion_tokens: an integer representing the number of tokens in the generated completion text.
314 pub completion_tokens: i32,
315 /// total_tokens: an integer representing the total number of tokens used in the completion request, including both the prompt and the generated completion text.
316 pub total_tokens: i32,
317 }
318}
319
320#[cfg(feature = "chat")]
321pub mod chat {
322 use serde::{Deserialize, Serialize};
323
324 #[derive(Debug, Serialize, Deserialize)]
325 pub struct ChatParameters {
326 /// ID of the model to use. See the
327 /// [model endpoint compatibility](https://platform.openai.com/docs/models/model-endpoint-compatibility) table
328 /// for details on which models work with the Chat API.
329 pub model: String,
330 /// A list of messages comprising the conversation so far.
331 pub messages: Vec<Message>,
332 /// A list of functions the model may generate JSON inputs for.
333 #[serde(skip_serializing_if = "Option::is_none")]
334 pub functions: Option<Vec<Function>>,
335 /// Controls how the model responds to function calls. "none" means the model does not call a function,
336 /// and responds to the end-user. "auto" means the model can pick between an end-user or calling a
337 /// function. Specifying a particular function via `{"name":\ "my_function"}` forces the model to call
338 /// that function. "none" is the default when no functions are present. "auto" is the default if functions
339 /// are present.
340 #[serde(skip_serializing_if = "Option::is_none")]
341 pub function_call: Option<serde_json::Value>,
342 /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output
343 /// more random, while lower values like 0.2 will make it more focused and deterministic.
344 ///
345 /// We generally recommend altering this or `top_p` but not both.
346 #[serde(skip_serializing_if = "Option::is_none")]
347 pub temperature: Option<f32>,
348 /// An alternative to sampling with temperature, called nucleus sampling, where the
349 /// model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.
350 ///
351 /// We generally recommend altering this or `temperature` but not both.
352 #[serde(skip_serializing_if = "Option::is_none")]
353 pub top_p: Option<f32>,
354 /// How many chat completion choices to generate for each input message.
355 #[serde(skip_serializing_if = "Option::is_none")]
356 pub n: Option<i32>,
357 /// If set, partial message deltas will be sent, like in ChatGPT.
358 /// Tokens will be sent as data-only server-sent events as they become available,
359 /// with the stream terminated by a `data: [DONE]` message. Example Python code.
360 #[serde(skip_serializing_if = "Option::is_none")]
361 pub stream: Option<bool>,
362 /// Up to 4 sequences where the API will stop generating further tokens.
363 #[serde(skip_serializing_if = "Option::is_none")]
364 pub stop: Option<Vec<String>>,
365 /// The maximum number of tokens to generate in the chat completion.
366 #[serde(skip_serializing_if = "Option::is_none")]
367 pub max_tokens: Option<i32>,
368 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the
369 /// text so far, increasing the model's likelihood to talk about new topics.
370 ///
371 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)
372 #[serde(skip_serializing_if = "Option::is_none")]
373 pub presence_penalty: Option<f32>,
374 /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in
375 /// the text so far, decreasing the model's likelihood to repeat the same line verbatim.
376 ///
377 /// [See more information about frequency and presence penalties.](https://platform.openai.com/docs/api-reference/parameter-details)
378 #[serde(skip_serializing_if = "Option::is_none")]
379 pub frequency_penalty: Option<f32>,
380 /// Modify the likelihood of specified tokens appearing in the completion.
381 ///
382 /// Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100.
383 /// Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,
384 /// but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or
385 /// exclusive selection of the relevant token.
386 #[serde(skip_serializing_if = "Option::is_none")]
387 pub logit_bias: Option<serde_json::Value>,
388 ///A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
389 #[serde(skip_serializing_if = "Option::is_none")]
390 pub user: Option<String>,
391 }
392
393 impl ChatParameters {
394 // realize the builder pattern
395 pub fn new(model: String, messages: Vec<Message>) -> Self {
396 Self {
397 model,
398 messages,
399 functions: None,
400 function_call: None,
401 temperature: None,
402 top_p: None,
403 n: None,
404 stream: None,
405 stop: None,
406 max_tokens: None,
407 presence_penalty: None,
408 frequency_penalty: None,
409 logit_bias: None,
410 user: None,
411 }
412 }
413
414 pub fn functions(mut self, functions: Vec<Function>) -> Self {
415 self.functions = Some(functions);
416 self
417 }
418
419 pub fn function_call(mut self, function_call: serde_json::Value) -> Self {
420 self.function_call = Some(function_call);
421 self
422 }
423
424 pub fn temperature(mut self, temperature: f32) -> Self {
425 self.temperature = Some(temperature);
426 self
427 }
428
429 pub fn top_p(mut self, top_p: f32) -> Self {
430 self.top_p = Some(top_p);
431 self
432 }
433
434 pub fn n(mut self, n: i32) -> Self {
435 self.n = Some(n);
436 self
437 }
438
439 pub fn stream(mut self, stream: bool) -> Self {
440 self.stream = Some(stream);
441 self
442 }
443
444 pub fn stop(mut self, stop: Vec<String>) -> Self {
445 self.stop = Some(stop);
446 self
447 }
448
449 pub fn max_tokens(mut self, max_tokens: i32) -> Self {
450 self.max_tokens = Some(max_tokens);
451 self
452 }
453
454 pub fn presence_penalty(mut self, presence_penalty: f32) -> Self {
455 self.presence_penalty = Some(presence_penalty);
456 self
457 }
458
459 pub fn frequency_penalty(mut self, frequency_penalty: f32) -> Self {
460 self.frequency_penalty = Some(frequency_penalty);
461 self
462 }
463
464 pub fn logit_bias(mut self, logit_bias: serde_json::Value) -> Self {
465 self.logit_bias = Some(logit_bias);
466 self
467 }
468
469 pub fn user(mut self, user: String) -> Self {
470 self.user = Some(user);
471 self
472 }
473
474 pub fn build(self) -> ChatParameters {
475 ChatParameters {
476 model: self.model,
477 messages: self.messages,
478 functions: self.functions,
479 function_call: self.function_call,
480 temperature: self.temperature,
481 top_p: self.top_p,
482 n: self.n,
483 stream: self.stream,
484 stop: self.stop,
485 max_tokens: self.max_tokens,
486 presence_penalty: self.presence_penalty,
487 frequency_penalty: self.frequency_penalty,
488 logit_bias: self.logit_bias,
489 user: self.user,
490 }
491 }
492 }
493
494 #[derive(Debug, Serialize, Deserialize)]
495 pub struct Function {
496 /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes,
497 /// with a maximum length of 64.
498 pub name: String,
499 /// The description of what the function does.
500 #[serde(skip_serializing_if = "Option::is_none")]
501 pub description: Option<String>,
502 /// The parameters the functions accepts, described as a JSON Schema object.
503 /// See the [guide](https://platform.openai.com/docs/guides/gpt/function-calling) for examples,
504 /// and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
505 /// documentation about the format.
506 #[serde(skip_serializing_if = "Option::is_none")]
507 #[serde(rename = "parameters")]
508 pub params: Option<serde_json::Value>,
509 }
510
511 #[derive(Debug, Serialize, Deserialize, Clone)]
512 pub struct ChatResponse {
513 /// The unique identifier for this chat response.
514 pub id: String,
515 /// The type of object, which is always "text_completion".
516 pub object: String,
517 /// The Unix timestamp (in seconds) when this chat response was created.
518 pub created: i64,
519 /// A vector of `CompletionChoice` structs, representing the different choices for the chat response.
520 pub choices: Vec<CompletionChoice>,
521 /// An object containing usage information for this API request.
522 pub usage: Usage,
523 }
524
525 #[derive(Debug, Serialize, Deserialize, Clone)]
526 pub struct CompletionChoice {
527 /// The index of this choice in the list of choices returned by the API.
528 pub index: i32,
529 /// The message generated by the API for this choice.
530 pub message: Message,
531 /// The reason why the API stopped generating further tokens for this choice.
532 pub finish_reason: String,
533 }
534
535 #[derive(Debug, Serialize, Deserialize, Clone)]
536 pub struct Message {
537 /// The role of the messages author. One of `system`, `user`, `assistant` or `function`.
538 pub role: String,
539 /// The contents of the message. `content` is required for
540 /// all messages except assistant messages with function calls.
541 #[serde(skip_serializing_if = "Option::is_none")]
542 pub content: Option<String>,
543 /// The name of the author of this message. `name` is required if role is `function`,
544 /// and it should be the name of the function whose response is in the `content`.
545 /// May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters.
546 #[serde(skip_serializing_if = "Option::is_none")]
547 pub name: Option<String>,
548 /// The name and arguments of a function that should be called, as generated by the model.
549 ///
550 ///**Now this optional field dont support in this crate.**
551 #[serde(skip_serializing_if = "Option::is_none")]
552 pub function_call: Option<serde_json::Value>,
553 }
554
555 #[derive(Debug, Serialize, Deserialize, Clone)]
556 pub struct Usage {
557 pub prompt_tokens: i32,
558 pub completion_tokens: i32,
559 pub total_tokens: i32,
560 }
561}
562
563#[cfg(feature = "images")]
564pub mod images {
565 use serde::{Deserialize, Serialize};
566
567 #[derive(Debug, Serialize, Deserialize)]
568 pub struct ImageCreateParameters {
569 pub prompt: String,
570 /// The number of images to generate. Must be between 1 and 10.
571 #[serde(skip_serializing_if = "Option::is_none")]
572 #[serde(rename = "n")]
573 pub num_images: Option<i32>,
574 /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024.
575 #[serde(skip_serializing_if = "Option::is_none")]
576 #[serde(rename = "size")]
577 pub image_size: Option<String>,
578 /// The format in which the generated images are returned. Must be one of `url` or `b64_json`.
579 #[serde(skip_serializing_if = "Option::is_none")]
580 pub response_format: Option<String>, // url of b64_json
581 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
582 /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
583 #[serde(skip_serializing_if = "Option::is_none")]
584 pub user: Option<String>,
585 }
586
587 impl ImageCreateParameters {
588 pub fn new(prompt: String) -> Self {
589 Self {
590 prompt,
591 num_images: None,
592 image_size: None,
593 response_format: None,
594 user: None,
595 }
596 }
597
598 pub fn num_images(mut self, num_images: i32) -> Self {
599 self.num_images = Some(num_images);
600 self
601 }
602
603 pub fn image_size(mut self, image_size: String) -> Self {
604 self.image_size = Some(image_size);
605 self
606 }
607
608 pub fn response_format(mut self, response_format: String) -> Self {
609 self.response_format = Some(response_format);
610 self
611 }
612
613 pub fn user(mut self, user: String) -> Self {
614 self.user = Some(user);
615 self
616 }
617
618 pub fn build(self) -> ImageCreateParameters {
619 self
620 }
621 }
622
623 #[derive(Debug, Serialize, Deserialize)]
624 pub struct ImageEditParameters {
625 /// The image to edit. Must be a valid PNG file, less than 4MB, and square.
626 /// If mask is not provided, image must have transparency, which will be used as the mask.
627 pub image: String,
628 /// An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited.
629 /// Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`.
630 #[serde(skip_serializing_if = "Option::is_none")]
631 pub mask: Option<String>,
632 /// A text description of the desired image(s). The maximum length is 1000 characters.
633 pub prompt: String,
634 /// The number of images to generate. Must be between 1 and 10.
635 #[serde(skip_serializing_if = "Option::is_none")]
636 #[serde(rename = "n")]
637 pub num_images: Option<i32>,
638 /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024.
639 #[serde(skip_serializing_if = "Option::is_none")]
640 #[serde(rename = "size")]
641 pub image_size: Option<String>,
642 /// The format in which the generated images are returned. Must be one of `url` or `b64_json`.
643 #[serde(skip_serializing_if = "Option::is_none")]
644 pub response_format: Option<String>, // url of b64_json
645 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
646 /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
647 #[serde(skip_serializing_if = "Option::is_none")]
648 pub user: Option<String>,
649 }
650
651 impl ImageEditParameters {
652 // builder pattern
653 pub fn new(image: String, prompt: String) -> Self {
654 Self {
655 image,
656 mask: None,
657 prompt,
658 num_images: None,
659 image_size: None,
660 response_format: None,
661 user: None,
662 }
663 }
664
665 pub fn mask(mut self, mask: String) -> Self {
666 self.mask = Some(mask);
667 self
668 }
669
670 pub fn num_images(mut self, num_images: i32) -> Self {
671 self.num_images = Some(num_images);
672 self
673 }
674
675 pub fn image_size(mut self, image_size: String) -> Self {
676 self.image_size = Some(image_size);
677 self
678 }
679
680 pub fn response_format(mut self, response_format: String) -> Self {
681 self.response_format = Some(response_format);
682 self
683 }
684
685 pub fn user(mut self, user: String) -> Self {
686 self.user = Some(user);
687 self
688 }
689
690 pub fn build(self) -> ImageEditParameters {
691 self
692 }
693 }
694
695 #[derive(Debug, Serialize, Deserialize)]
696 pub struct ImageVariationParameters {
697 /// The image to edit. Must be a valid PNG file, less than 4MB, and square.
698 /// If mask is not provided, image must have transparency, which will be used as the mask.
699 pub image: String,
700 /// The number of images to generate. Must be between 1 and 10.
701 #[serde(skip_serializing_if = "Option::is_none")]
702 #[serde(rename = "n")]
703 pub num_images: Option<i32>,
704 /// The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024.
705 #[serde(skip_serializing_if = "Option::is_none")]
706 #[serde(rename = "size")]
707 pub image_size: Option<String>,
708 /// The format in which the generated images are returned. Must be one of `url` or `b64_json`.
709 #[serde(skip_serializing_if = "Option::is_none")]
710 pub response_format: Option<String>, // url of b64_json
711 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
712 /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
713 #[serde(skip_serializing_if = "Option::is_none")]
714 pub user: Option<String>,
715 }
716
717 #[derive(Debug, Serialize, Deserialize)]
718 pub struct ImageResponse {
719 /// The timestamp (in seconds since the Unix epoch) when the request was made.
720 pub created: usize,
721 /// A vector of ImageData structs containing the URLs of the generated images.
722 pub data: Vec<ImageData>,
723 }
724
725 #[derive(Debug, Serialize, Deserialize)]
726 pub struct ImageData {
727 /// The URL of the generated image.
728 pub url: String,
729 }
730}
731
732#[cfg(feature = "embeddings")]
733pub mod embeddings {
734 use serde::{Deserialize, Serialize};
735
736 #[derive(Debug, Serialize, Deserialize)]
737 pub struct EmbeddingParameters {
738 /// ID of the model to use. You can use the List models API to see all of your available models,
739 /// or see our Model overview for descriptions of them.
740 pub model: String,
741 ///nput text to embed, encoded as a string or array of tokens. To embed multiple
742 /// inputs in a single request, pass an array of strings or array of token arrays.
743 /// Each input must not exceed the max input tokens for the model (8191 tokens for text-embedding-ada-002).
744 pub input: String,
745 /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
746 /// [Learn more](https://platform.openai.com/docs/guides/safety-best-practices/end-user-ids).
747 #[serde(skip_serializing_if = "Option::is_none")]
748 pub user: Option<String>,
749 }
750
751 impl EmbeddingParameters {
752 // builder pattern
753 pub fn new(model: String, input: String) -> Self {
754 Self {
755 model,
756 input,
757 user: None,
758 }
759 }
760
761 pub fn user(mut self, user: String) -> Self {
762 self.user = Some(user);
763 self
764 }
765
766 pub fn build(self) -> EmbeddingParameters {
767 self
768 }
769 }
770
771 #[derive(Debug, Serialize, Deserialize)]
772 pub struct EmbeddingResponse {
773 /// A string representing the type of object returned. In this case, it should always be "embedding".
774 pub object: String,
775 /// A vector of `EmbeddingData` representing the embedding of the input text.
776 pub data: Vec<EmbeddingData>,
777 /// ID of the model used for the embedding.
778 pub model: String,
779 /// An object containing information about the API usage for the request.
780 pub usage: Usage,
781 }
782
783 #[derive(Debug, Serialize, Deserialize)]
784 pub struct EmbeddingData {
785 /// object: A string representing the type of object returned. In this case, it should always be "embedding".
786 pub object: String,
787 /// embedding: A vector of 32-bit floating point numbers representing the embedding of the input text. The length of the vector depends on the model used for the embedding.
788 pub embedding: Vec<f32>,
789 /// index: An integer representing the index of the input text in the request. This is useful when multiple inputs are passed in a single request.
790 pub index: i32,
791 }
792
793 #[derive(Debug, Serialize, Deserialize)]
794 pub struct Usage {
795 /// prompt_tokens: An integer representing the number of tokens used in the prompt for the API request.
796 pub prompt_tokens: i32,
797 /// total_tokens: An integer representing the total number of tokens used in the API request, including the prompt tokens.
798 pub total_tokens: i32,
799 }
800}
801
802#[cfg(feature = "audio")]
803pub mod audio {
804 use serde::{Deserialize, Serialize};
805
806 #[derive(Debug, Serialize)]
807 pub struct TranscriptionParameters {
808 /// The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
809 pub file: String,
810 /// ID of the model to use. Only `whisper-1` is currently available.
811 pub model: String,
812 /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
813 #[serde(skip_serializing_if = "Option::is_none")]
814 pub prompt: Option<String>,
815 /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
816 #[serde(skip_serializing_if = "Option::is_none")]
817 pub respone_format: Option<String>,
818 /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2
819 /// will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature
820 /// until certain thresholds are hit.
821 #[serde(skip_serializing_if = "Option::is_none")]
822 pub temperature: Option<f32>,
823 /// The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency.
824 #[serde(skip_serializing_if = "Option::is_none")]
825 pub language: Option<String>,
826 }
827
828 impl TranscriptionParameters {
829 pub fn new(file: String, model: String) -> Self {
830 Self {
831 file,
832 model,
833 prompt: None,
834 respone_format: None,
835 temperature: None,
836 language: None,
837 }
838 }
839
840 pub fn prompt(mut self, prompt: String) -> Self {
841 self.prompt = Some(prompt);
842 self
843 }
844
845 pub fn respone_format(mut self, respone_format: String) -> Self {
846 self.respone_format = Some(respone_format);
847 self
848 }
849
850 pub fn temperature(mut self, temperature: f32) -> Self {
851 self.temperature = Some(temperature);
852 self
853 }
854
855 pub fn language(mut self, language: String) -> Self {
856 self.language = Some(language);
857 self
858 }
859
860 pub fn build(self) -> TranscriptionParameters {
861 self
862 }
863 }
864
865 #[derive(Debug, Serialize)]
866 pub struct TranslationParameters {
867 /// The audio file object (not file name) to transcribe, in one of these formats: mp3, mp4, mpeg, mpga, m4a, wav, or webm.
868 pub file: String,
869 /// ID of the model to use. Only `whisper-1` is currently available.
870 pub model: String,
871 /// An optional text to guide the model's style or continue a previous audio segment. The prompt should match the audio language.
872 #[serde(skip_serializing_if = "Option::is_none")]
873 pub prompt: Option<String>,
874 /// The format of the transcript output, in one of these options: json, text, srt, verbose_json, or vtt.
875 /// The default is json.
876 #[serde(skip_serializing_if = "Option::is_none")]
877 pub respone_format: Option<String>,
878 /// The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2
879 /// will make it more focused and deterministic. If set to 0, the model will use log probability to automatically increase the temperature
880 /// until certain thresholds are hit.
881 /// The default is 1.
882 #[serde(skip_serializing_if = "Option::is_none")]
883 pub temperature: Option<f32>,
884 }
885
886 impl TranslationParameters {
887 pub fn new(file: String, model: String) -> Self {
888 Self {
889 file,
890 model,
891 prompt: None,
892 respone_format: None,
893 temperature: None,
894 }
895 }
896
897 pub fn prompt(mut self, prompt: String) -> Self {
898 self.prompt = Some(prompt);
899 self
900 }
901
902 pub fn respone_format(mut self, respone_format: String) -> Self {
903 self.respone_format = Some(respone_format);
904 self
905 }
906
907 pub fn temperature(mut self, temperature: f32) -> Self {
908 self.temperature = Some(temperature);
909 self
910 }
911
912 pub fn build(self) -> TranslationParameters {
913 self
914 }
915 }
916
917 #[derive(Debug, Deserialize)]
918 pub struct TextResponse {
919 /// The generated text from the OpenAI API.
920 pub text: String,
921 }
922}
923
924#[cfg(feature = "files")]
925pub mod files {
926 use serde::{Deserialize, Serialize};
927
928 #[derive(Debug, Serialize, Deserialize)]
929 pub struct FileList {
930 /// A vector of `FileData` objects representing the files returned by the API.
931 pub data: Vec<FileData>,
932 /// A string representing the object type returned by the API. This should always be "list".
933 pub object: String,
934 }
935
936 #[derive(Debug, Serialize, Deserialize)]
937 pub struct FileData {
938 /// The unique identifier for the file.
939 pub id: String,
940 /// The type of object, which should always be "file".
941 pub object: String,
942 /// The size of the file in bytes.
943 pub bytes: u32,
944 /// The Unix timestamp (in seconds) when the file was created.
945 pub created_at: u64,
946 /// The name of the file.
947 pub filename: String,
948 /// The intended purpose of the file.
949 pub purpose: String,
950 }
951
952 #[derive(Debug, Serialize, Deserialize)]
953 pub struct FileUpload {
954 /// Name of the [JSON Lines](https://jsonlines.readthedocs.io/en/latest/) file to be uploaded.
955 ///
956 /// If the purpose is set to "fine-tune", each line is a JSON record with "prompt" and "completion"
957 /// fields representing your [training examples.](https://platform.openai.com/docs/guides/fine-tuning/prepare-training-data)
958 pub file: String,
959 /// The intended purpose of the uploaded documents.
960 ///
961 /// Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file.
962 pub purpose: String,
963 }
964
965 #[derive(Debug, Serialize, Deserialize)]
966 pub struct DeleteResponse {
967 /// The unique identifier for the deleted object.
968 pub id: String,
969 /// The type of object that was deleted.
970 pub object: String,
971 /// A boolean indicating whether the object was successfully deleted.
972 pub deleted: bool,
973 }
974}
975
976#[cfg(feature = "fine_tunes")]
977pub mod fine_tunes {
978 use serde::{Deserialize, Serialize};
979
980 #[derive(Debug, Serialize, Deserialize)]
981 pub struct CreateFineTuneParameters {
982 /// The ID of an uploaded file that contains training data.
983 pub training_file: String,
984 /// The ID of an uploaded file that contains validation data.
985 #[serde(skip_serializing_if = "Option::is_none")]
986 pub validation_file: Option<String>,
987 /// The name of the base model to use for fine-tuning.
988 #[serde(skip_serializing_if = "Option::is_none")]
989 pub model: Option<String>,
990 /// The number of epochs to train the model for.
991 #[serde(skip_serializing_if = "Option::is_none")]
992 pub epochs: Option<u32>,
993 /// The batch size to use for training.
994 #[serde(skip_serializing_if = "Option::is_none")]
995 pub batch_size: Option<u32>,
996 /// The learning rate multiplier to use for training.
997 /// The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value.
998 #[serde(skip_serializing_if = "Option::is_none")]
999 pub learning_rate_multiplier: Option<f32>,
1000 /// The weight to use for loss on the prompt tokens. This controls how much the model tries
1001 /// to learn to generate the prompt (as compared to the completion which always has a weight of 1.0),
1002 /// and can add a stabilizing effect to training when completions are short.
1003 #[serde(skip_serializing_if = "Option::is_none")]
1004 pub prompt_loss_weight: Option<f32>,
1005 /// If set, we calculate classification-specific metrics such as accuracy and F-1 score using
1006 /// the validation set at the end of every epoch. These metrics can be viewed in the results file.
1007 ///
1008 /// In order to compute classification metrics, you must provide a `validation_file`.
1009 /// Additionally, you must specify `classification_n_classes` for multiclass classification or
1010 /// `classification_positive_class` for binary classification.
1011 #[serde(skip_serializing_if = "Option::is_none")]
1012 pub compute_classification_metrics: Option<bool>,
1013 /// The number of classes in a classification task.
1014 ///
1015 /// This parameter is required for multiclass classification.
1016 #[serde(skip_serializing_if = "Option::is_none")]
1017 pub classification_n_classes: Option<u32>,
1018 /// The positive class in binary classification.
1019 ///
1020 /// This parameter is needed to generate precision, recall,
1021 /// and F1 metrics when doing binary classification.
1022 #[serde(skip_serializing_if = "Option::is_none")]
1023 pub classification_positive_class: Option<String>,
1024 /// If this is provided, we calculate F-beta scores at the specified beta values.
1025 /// The F-beta score is a generalization of F-1 score. This is only used for binary classification.
1026 ///
1027 /// With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight.
1028 /// A larger beta score puts more weight on recall and less on precision. A smaller beta score puts
1029 /// more weight on precision and less on recall.
1030 #[serde(skip_serializing_if = "Option::is_none")]
1031 pub classification_beta: Option<f32>,
1032 ///A string of up to 40 characters that will be added to your fine-tuned model name.
1033 ///
1034 /// For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04.
1035 #[serde(skip_serializing_if = "Option::is_none")]
1036 pub suffix: Option<String>,
1037 }
1038
1039 impl CreateFineTuneParameters {
1040 pub fn new(training_file: String) -> Self {
1041 Self {
1042 training_file,
1043 validation_file: None,
1044 model: None,
1045 epochs: None,
1046 batch_size: None,
1047 learning_rate_multiplier: None,
1048 prompt_loss_weight: None,
1049 compute_classification_metrics: None,
1050 classification_n_classes: None,
1051 classification_positive_class: None,
1052 classification_beta: None,
1053 suffix: None,
1054 }
1055 }
1056
1057 pub fn validation_file(mut self, validation_file: String) -> Self {
1058 self.validation_file = Some(validation_file);
1059 self
1060 }
1061
1062 pub fn model(mut self, model: String) -> Self {
1063 self.model = Some(model);
1064 self
1065 }
1066
1067 pub fn epochs(mut self, epochs: u32) -> Self {
1068 self.epochs = Some(epochs);
1069 self
1070 }
1071
1072 pub fn batch_size(mut self, batch_size: u32) -> Self {
1073 self.batch_size = Some(batch_size);
1074 self
1075 }
1076
1077 pub fn learning_rate_multiplier(mut self, learning_rate_multiplier: f32) -> Self {
1078 self.learning_rate_multiplier = Some(learning_rate_multiplier);
1079 self
1080 }
1081
1082 pub fn prompt_loss_weight(mut self, prompt_loss_weight: f32) -> Self {
1083 self.prompt_loss_weight = Some(prompt_loss_weight);
1084 self
1085 }
1086
1087 pub fn compute_classification_metrics(
1088 mut self,
1089 compute_classification_metrics: bool,
1090 ) -> Self {
1091 self.compute_classification_metrics = Some(compute_classification_metrics);
1092 self
1093 }
1094
1095 pub fn classification_n_classes(mut self, classification_n_classes: u32) -> Self {
1096 self.classification_n_classes = Some(classification_n_classes);
1097 self
1098 }
1099
1100 pub fn classification_positive_class(
1101 mut self,
1102 classification_positive_class: String,
1103 ) -> Self {
1104 self.classification_positive_class = Some(classification_positive_class);
1105 self
1106 }
1107
1108 pub fn classification_beta(mut self, classification_beta: f32) -> Self {
1109 self.classification_beta = Some(classification_beta);
1110 self
1111 }
1112
1113 pub fn suffix(mut self, suffix: String) -> Self {
1114 self.suffix = Some(suffix);
1115 self
1116 }
1117
1118 pub fn build(self) -> Result<CreateFineTuneParameters, String> {
1119 if self.validation_file.is_some() && self.compute_classification_metrics.is_none() {
1120 return Err("You must set compute_classification_metrics to true if you provide a validation_file.".to_string());
1121 }
1122
1123 if self.classification_n_classes.is_some()
1124 && self.classification_positive_class.is_none()
1125 {
1126 return Err("You must set classification_positive_class if you provide classification_n_classes.".to_string());
1127 }
1128
1129 if self.classification_positive_class.is_some()
1130 && self.classification_n_classes.is_none()
1131 {
1132 return Err("You must set classification_n_classes if you provide classification_positive_class.".to_string());
1133 }
1134
1135 if self.classification_beta.is_some() && self.classification_positive_class.is_none() {
1136 return Err("You must set classification_positive_class if you provide classification_beta.".to_string());
1137 }
1138
1139 Ok(self)
1140 }
1141 }
1142
1143 #[derive(Debug, Serialize, Deserialize)]
1144 pub struct FineTuneList {
1145 /// The object type, which is always "list".
1146 pub object: String,
1147 /// A vector of `FineTuneData` structs representing the fine-tuned models.
1148 pub data: Vec<FineTuneListData>,
1149 }
1150
1151 #[derive(Debug, Serialize, Deserialize)]
1152 pub struct FineTuneRetriveData {
1153 /// The ID of the fine-tuned model.
1154 pub id: String,
1155 /// The object type, which is always "fine_tune".
1156 pub object: String,
1157 /// The name of the base model that was fine-tuned.
1158 pub model: String,
1159 /// The Unix timestamp (in seconds) when the fine-tuned model was created.
1160 pub created_at: i64,
1161 /// The vector of `FineTuneEvent` structs representing the events that occurred during fine-tuning.
1162 pub events: Vec<FineTuneEvent>,
1163 /// The ID of the fine-tuned model that was created.
1164 pub fine_tuned_model: Option<String>,
1165 /// The hyperparameters used for fine-tuning the model.
1166 pub hyperparams: FineTuneHyperparams,
1167 /// The ID of the organization that created the fine-tuned model.
1168 pub organization_id: String,
1169 /// A vector of URLs pointing to the result files generated during fine-tuning.
1170 pub result_files: Vec<String>,
1171 /// The status of the fine-tuned model.
1172 pub status: String,
1173 /// A vector of `FineTuneFiles` structs representing the validation files used during fine-tuning.
1174 pub validation_files: Vec<FineTuneFiles>,
1175 /// A vector of `FineTuneFiles` structs representing the training files used during fine-tuning.
1176 pub training_files: Vec<FineTuneFiles>,
1177 /// The Unix timestamp (in seconds) when the fine-tuned model was last updated.
1178 pub updated_at: i64,
1179 }
1180
1181 #[derive(Debug, Serialize, Deserialize)]
1182 pub struct FineTuneListData {
1183 /// The ID of the fine-tuned model.
1184 pub id: String,
1185 /// The object type, which is always "fine_tune".
1186 pub object: String,
1187 /// The name of the base model that was fine-tuned.
1188 pub model: String,
1189 /// The Unix timestamp (in seconds) when the fine-tuned model was created.
1190 pub created_at: i64,
1191 /// The ID of the fine-tuned model that was created.
1192 pub fine_tuned_model: Option<String>,
1193 /// The hyperparameters used for fine-tuning the model.
1194 pub hyperparams: FineTuneHyperparams,
1195 /// The ID of the organization that created the fine-tuned model.
1196 pub organization_id: String,
1197 /// A vector of URLs pointing to the result files generated during fine-tuning.
1198 pub result_files: Vec<String>,
1199 /// The status of the fine-tuned model.
1200 pub status: String,
1201 /// A vector of `FineTuneFiles` structs representing the validation files used during fine-tuning.
1202 pub validation_files: Vec<FineTuneFiles>,
1203 /// A vector of `FineTuneFiles` structs representing the training files used during fine-tuning.
1204 pub training_files: Vec<FineTuneFiles>,
1205 /// The Unix timestamp (in seconds) when the fine-tuned model was last updated.
1206 pub updated_at: i64,
1207 }
1208
1209 #[derive(Debug, Serialize, Deserialize)]
1210 /// A struct representing the hyperparameters used for fine-tuning a model.
1211 pub struct FineTuneHyperparams {
1212 /// The batch size used during fine-tuning.
1213 pub batch_size: u32,
1214 /// The number of epochs used during fine-tuning.
1215 pub epochs: u32,
1216 /// A multiplier applied to the learning rate during fine-tuning.
1217 pub learning_rate_multiplier: f32,
1218 /// The weight given to the prompt loss during fine-tuning.
1219 pub prompt_loss_weight: f32,
1220 }
1221
1222 #[derive(Debug, Serialize, Deserialize)]
1223 /// A struct representing a file used during fine-tuning a model.
1224 pub struct FineTuneFiles {
1225 /// The ID of the file.
1226 pub id: String,
1227 /// The object type, which is always "file".
1228 pub object: String,
1229 /// The size of the file in bytes.
1230 pub bytes: u32,
1231 /// The Unix timestamp (in seconds) when the file was created.
1232 pub created_at: i64,
1233 /// The name of the file.
1234 pub filename: String,
1235 /// The purpose of the file, which can be "training" or "validation".
1236 pub purpose: String,
1237 }
1238
1239 #[derive(Debug, Serialize, Deserialize)]
1240 pub struct FineTuneEventList {
1241 /// The object type, which is always "list".
1242 pub object: String,
1243 /// A vector of `FineTuneEvent` structs representing the fine-tuned events.
1244 pub data: Vec<FineTuneEvent>,
1245 }
1246
1247 #[derive(Debug, Serialize, Deserialize)]
1248 /// A struct representing a fine-tuned event.
1249 pub struct FineTuneEvent {
1250 /// The object type, which is always "fine_tune_event".
1251 pub object: String,
1252 /// The Unix timestamp (in seconds) when the fine-tuned event was created.
1253 pub created_at: i64,
1254 /// The level of the fine-tuned event, which can be "info", "warning", or "error".
1255 pub level: String,
1256 /// The message associated with the fine-tuned event.
1257 pub message: String,
1258 }
1259
1260 #[derive(Debug, Serialize, Deserialize)]
1261 pub struct FineTuneDelete {
1262 /// The ID of the fine-tuned model that was deleted.
1263 pub id: String,
1264 /// The object type, which is always "fine_tune".
1265 pub object: String,
1266 /// A boolean indicating whether the fine-tuned model was successfully deleted.
1267 pub deleted: bool,
1268 }
1269}
1270
1271#[cfg(feature = "moderations")]
1272pub mod moderations {
1273 use serde::{Deserialize, Serialize};
1274
1275 #[derive(Debug, Serialize, Deserialize)]
1276 pub struct TextModerationParameters {
1277 /// The ID of the model to use for moderation.
1278 #[serde(skip_serializing_if = "Option::is_none")]
1279 pub model: Option<String>,
1280 /// The text to moderate.
1281 pub input: String,
1282 }
1283
1284 #[derive(Debug, Serialize, Deserialize)]
1285 pub struct TextModerationResult {
1286 /// The ID of the moderation result.
1287 pub id: String,
1288 /// The name of the model used for moderation.
1289 pub model: String,
1290 /// The moderation results.
1291 pub results: Vec<TextModerationCategory>,
1292 }
1293
1294 #[derive(Debug, Serialize, Deserialize)]
1295 pub struct TextModerationCategory {
1296 /// Whether the text was flagged for this category.
1297 pub flagged: bool,
1298 /// The categories and their corresponding boolean values.
1299 pub categories: TextModerationCategoryValues,
1300 /// The scores for each category.
1301 pub category_scores: TextModerationCategoryScores,
1302 }
1303
1304 #[derive(Debug, Serialize, Deserialize)]
1305 pub struct TextModerationCategoryValues {
1306 /// Whether the text was flagged for sexual content.
1307 pub sexual: bool,
1308 /// Whether the text was flagged for hate speech.
1309 pub hate: bool,
1310 /// Whether the text was flagged for harassment.
1311 pub harassment: bool,
1312 /// Whether the text was flagged for self-harm.
1313 pub self_harm: bool,
1314 /// Whether the text was flagged for sexual content involving minors.
1315 pub sexual_minors: bool,
1316 /// Whether the text was flagged for hate speech with threatening language.
1317 pub hate_threatening: bool,
1318 /// Whether the text was flagged for graphic violence.
1319 pub violence_graphic: bool,
1320 /// Whether the text was flagged for self-harm with intent.
1321 pub self_harm_intent: bool,
1322 /// Whether the text was flagged for self-harm instructions.
1323 pub self_harm_instructions: bool,
1324 /// Whether the text was flagged for harassment with threatening language.
1325 pub harassment_threatening: bool,
1326 /// Whether the text was flagged for violence.
1327 pub violence: bool,
1328 }
1329
1330 #[derive(Debug, Serialize, Deserialize)]
1331 pub struct TextModerationCategoryScores {
1332 /// The score for sexual content.
1333 pub sexual: f64,
1334 /// The score for hate speech.
1335 pub hate: f64,
1336 /// The score for harassment.
1337 pub harassment: f64,
1338 /// The score for self-harm.
1339 pub self_harm: f64,
1340 /// The score for sexual content involving minors.
1341 pub sexual_minors: f64,
1342 /// The score for hate speech with threatening language.
1343 pub hate_threatening: f64,
1344 /// The score for graphic violence.
1345 pub violence_graphic: f64,
1346 /// The score for self-harm with intent.
1347 pub self_harm_intent: f64,
1348 /// The score for self-harm instructions.
1349 pub self_harm_instructions: f64,
1350 /// The score for harassment with threatening language.
1351 pub harassment_threatening: f64,
1352 /// The score for violence.
1353 pub violence: f64,
1354 }
1355}