davinci/
lib.rs

1//! Use the davinci model from the OpenAI API
2//!
3//! This library provides a function for asking questions to the OpenAI Davinci model and getting a response.
4//! # davinci
5//! `davinci` is the main function, and it has 4 parameters:
6//! * `api_key` -> String - This is the OpenAi api key.
7//!     It can be obtained [here](https://beta.openai.com/account/api-keys)
8//! * `context` -> String - The context for the question.
9//! * `question` -> String - The question or phrase to ask the model.
10//! * `tokens` -> i32 - The maximum number of tokens to use in the response.
11//!
12//! ## `context` and `question`
13//! The `context` and `question` are the prompt for the model.
14//! A prompt is a text string given to a model as input that gives the model a specific task to perform.
15//!
16//! Providing good and strong context to the model
17//! (such as by giving a few high-quality examples of desired behavior prior to the new input)
18//! can make it easier to obtain better and desired outputs.
19//!
20//! ## `tokens`
21//! Tokens is the max. tokens to be generated (counting the prompt) by a model.
22//!
23//! The GPT family of models process text using tokens, which are common sequences of characters found in text.
24//! The models understand the statistical relationships between these tokens, and excel at producing the next token in a sequence of tokens.
25//!
26//!
27//! Token generally corresponds to ~4 characters of text for common English text.
28//! This translates to roughly ¾ of a word (so 100 tokens ~= 75 words).
29//!
30//! Another thing to keep in mind, is that the `tokens` highest value is 2048 (4096 in new models).
31//!
32//! One way to know the number of tokens your prompt has is using [this site](https://beta.openai.com/tokenizer)
33//!
34//! ## Example of usage
35//! In this quick example we use davinci to find a answer to user's question.
36//!
37//! ```
38//! use davinci::davinci;
39//! use std::io;
40//!
41//! fn main() {
42//!     let api: String = String::from("vj-JZkjskhdksKXOlncknjckukNKKnkJNKJNkNKNk");
43//!     let max_tokens: i32 = 100;
44//!     let context: String =
45//!         String::from("The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n\nHuman: Hello, who are you?\nAI: I am an AI created by OpenAI. How can I help you today?");
46//!     println!("What is your question?");
47//!     // Reading the user input
48//!     let mut question: String = String::new();
49//!     io::stdin()
50//!         .read_line(&mut question)
51//!         .expect("Error, you have to write something!");
52//!     let response: String = match davinci(api, context, question, max_tokens) {
53//!         Ok(res) => res,
54//!         Err(error) => error.to_string(),
55//!     };
56//!     println!("{}", response);
57//! }
58//! ```
59//!
60use reqwest::{Client, Error, Response};
61use serde::{Deserialize, Serialize};
62
63#[derive(Debug, Serialize, Deserialize)]
64struct Parameters {
65    model: String,
66    prompt: String,
67    temperature: f64,
68    max_tokens: i32,
69    top_p: u8,
70    frequency_penalty: f64,
71    presence_penalty: f64,
72    stop: Vec<String>,
73}
74
75#[derive(Debug, Serialize, Deserialize)]
76struct Choice {
77    text: String,
78    index: u8,
79    logprobs: Option<i32>,
80    finish_reason: String,
81}
82
83#[derive(Debug, Serialize, Deserialize)]
84struct Usage {
85    prompt_tokens: i32,
86    completion_tokens: i32,
87    total_tokens: i32,
88}
89
90#[derive(Debug, Serialize, Deserialize)]
91struct OpenAIResponse {
92    id: String,
93    object: String,
94    created: u64,
95    model: String,
96    choices: Vec<Choice>,
97    usage: Usage,
98}
99#[tokio::main]
100/// # Parameters
101///
102/// * `api_key` - The OpenAI API key.
103///     This must be well written, as it will throw an error if not.
104/// * `context` - The context for the question.
105///     The context is important for good responses as it tells the model how it should be it's behavior.
106/// * `question` - The question or phrase to ask the model.
107/// * `tokens` - The maximum number of tokens to use in the response.
108///
109/// # Returns
110///
111/// Returns the model's response as a Ok(String) or an Error.
112///
113pub async fn davinci(
114    api_key: String,
115    context: String,
116    question: String,
117    tokens: i32,
118) -> Result<String, Error> {
119    let bearer = String::from("Bearer ") + &api_key;
120
121    let resp: String = format!("{}.\nH: {}.\nIA:", context, question);
122    let prompt = Parameters {
123        model: String::from("text-davinci-003"),
124        prompt: resp,
125        temperature: 0.9,
126        max_tokens: tokens,
127        top_p: 1,
128        frequency_penalty: 0.0,
129        presence_penalty: 0.6,
130        stop: vec![String::from("\n")],
131    };
132
133    let client = Client::new();
134    let resp: Response = client
135        .post("https://api.openai.com/v1/completions")
136        .header("Content-Type", "application/json")
137        .header("Authorization", bearer)
138        .json(&prompt)
139        .send()
140        .await
141        .expect("Error while getting the response");
142
143    let openai_response: OpenAIResponse = resp
144        .json()
145        .await
146        .expect("Error while generating the response");
147
148    let formatted_response = format!("{}", openai_response.choices[0].text);
149    return Ok(formatted_response);
150}