pub struct Client { /* private fields */ }
Expand description
This is the main interface to interact with the api.
Implementations§
Source§impl Client
impl Client
Sourcepub fn new(api_key: &str) -> Client
pub fn new(api_key: &str) -> Client
Create a new client. This will automatically build a reqwest::Client used internally.
Examples found in repository?
5async fn main() {
6 let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
7 let args = openai_rust::chat::ChatArguments::new(
8 "gpt-3.5-turbo",
9 vec![openai_rust::chat::Message {
10 role: "user".to_owned(),
11 content: "Hello GPT!".to_owned(),
12 }],
13 );
14 let res = client.create_chat(args).await.unwrap();
15 println!("{}", res);
16}
More examples
7async fn main() {
8 let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
9 let args = openai_rust::chat::ChatArguments::new(
10 "gpt-3.5-turbo",
11 vec![openai_rust::chat::Message {
12 role: "user".to_owned(),
13 content: "Hello GPT!".to_owned(),
14 }],
15 );
16 let mut res = client.create_chat_stream(args).await.unwrap();
17 while let Some(chunk) = res.next().await {
18 print!("{}", chunk.unwrap());
19 std::io::stdout().flush().unwrap();
20 }
21}
Sourcepub fn new_with_client(api_key: &str, req_client: Client) -> Client
pub fn new_with_client(api_key: &str, req_client: Client) -> Client
Build a client using your own reqwest::Client.
Sourcepub async fn list_models(&self) -> Result<Vec<Model>, Error>
pub async fn list_models(&self) -> Result<Vec<Model>, Error>
List and describe the various models available in the API. You can refer to the Models documentation to understand what models are available and the differences between them.
let client = openai_rust::Client::new(api_key);
let models = client.list_models().await.unwrap();
See https://platform.openai.com/docs/api-reference/models/list.
Sourcepub async fn create_chat(
&self,
args: ChatArguments,
) -> Result<ChatCompletion, Error>
pub async fn create_chat( &self, args: ChatArguments, ) -> Result<ChatCompletion, Error>
Given a list of messages comprising a conversation, the model will return a response.
See https://platform.openai.com/docs/api-reference/chat.
let client = openai_rust::Client::new(api_key);
let args = openai_rust::chat::ChatArguments::new("gpt-3.5-turbo", vec![
openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}
]);
let res = client.create_chat(args).await.unwrap();
println!("{}", res.choices[0].message.content);
Examples found in repository?
5async fn main() {
6 let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
7 let args = openai_rust::chat::ChatArguments::new(
8 "gpt-3.5-turbo",
9 vec![openai_rust::chat::Message {
10 role: "user".to_owned(),
11 content: "Hello GPT!".to_owned(),
12 }],
13 );
14 let res = client.create_chat(args).await.unwrap();
15 println!("{}", res);
16}
Sourcepub async fn create_chat_stream(
&self,
args: ChatArguments,
) -> Result<ChatCompletionChunkStream>
pub async fn create_chat_stream( &self, args: ChatArguments, ) -> Result<ChatCompletionChunkStream>
Like Client::create_chat but with streaming.
See https://platform.openai.com/docs/api-reference/chat.
This method will return a stream of chat::stream::ChatCompletionChunks. Use with futures_util::StreamExt::next.
use openai_rust::futures_util::StreamExt;
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(chunk) = res.next().await {
print!("{}", chunk.unwrap());
std::io::stdout().flush().unwrap();
}
Examples found in repository?
7async fn main() {
8 let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
9 let args = openai_rust::chat::ChatArguments::new(
10 "gpt-3.5-turbo",
11 vec![openai_rust::chat::Message {
12 role: "user".to_owned(),
13 content: "Hello GPT!".to_owned(),
14 }],
15 );
16 let mut res = client.create_chat_stream(args).await.unwrap();
17 while let Some(chunk) = res.next().await {
18 print!("{}", chunk.unwrap());
19 std::io::stdout().flush().unwrap();
20 }
21}
Sourcepub async fn create_completion(
&self,
args: CompletionArguments,
) -> Result<CompletionResponse>
pub async fn create_completion( &self, args: CompletionArguments, ) -> Result<CompletionResponse>
Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
See https://platform.openai.com/docs/api-reference/completions
let c = openai_rust::Client::new(api_key);
let args = openai_rust::completions::CompletionArguments::new("text-davinci-003", "The quick brown fox".to_owned());
println!("{}", c.create_completion(args).await.unwrap().choices[0].text);
Sourcepub async fn create_edit(&self, args: EditArguments) -> Result<EditResponse>
👎Deprecated: Use the chat api instead
pub async fn create_edit(&self, args: EditArguments) -> Result<EditResponse>
Given a prompt and an instruction, the model will return an edited version of the prompt.
See https://platform.openai.com/docs/api-reference/edits
let c = openai_rust::Client::new(api_key);
let args = openai_rust::edits::EditArguments::new("text-davinci-edit-001", "The quick brown fox".to_owned(), "Complete this sentence.".to_owned());
println!("{}", c.create_edit(args).await.unwrap().to_string());
Sourcepub async fn create_embeddings(
&self,
args: EmbeddingsArguments,
) -> Result<EmbeddingsResponse>
pub async fn create_embeddings( &self, args: EmbeddingsArguments, ) -> Result<EmbeddingsResponse>
Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
See https://platform.openai.com/docs/api-reference/embeddings
let c = openai_rust::Client::new(api_key);
let args = openai_rust::embeddings::EmbeddingsArguments::new("text-embedding-ada-002", "The food was delicious and the waiter...".to_owned());
println!("{:?}", c.create_embeddings(args).await.unwrap().data);
Sourcepub async fn create_image(&self, args: ImageArguments) -> Result<Vec<String>>
pub async fn create_image(&self, args: ImageArguments) -> Result<Vec<String>>
Creates an image given a prompt.