Struct openai_rust::Client
source · pub struct Client { /* private fields */ }
Expand description
This is the main interface to interact with the api.
Implementations§
source§impl Client
impl Client
sourcepub fn new(api_key: &str) -> Client
pub fn new(api_key: &str) -> Client
Create a new client. This will automatically build a reqwest::Client used internally.
Examples found in repository?
5 6 7 8 9 10 11 12 13 14 15 16
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new(
"gpt-3.5-turbo",
vec![openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}],
);
let res = client.create_chat(args).await.unwrap();
println!("{}", res);
}
More examples
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new(
"gpt-3.5-turbo",
vec![openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}],
);
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event);
std::io::stdout().flush().unwrap();
}
}
}
sourcepub fn new_with_client(api_key: &str, req_client: Client) -> Client
pub fn new_with_client(api_key: &str, req_client: Client) -> Client
Build a client using your own reqwest::Client.
sourcepub async fn list_models(&self) -> Result<Vec<Model>, Error>
pub async fn list_models(&self) -> Result<Vec<Model>, Error>
List and describe the various models available in the API. You can refer to the Models documentation to understand what models are available and the differences between them.
let client = openai_rust::Client::new(api_key);
let models = client.list_models().await.unwrap();
See https://platform.openai.com/docs/api-reference/models/list.
sourcepub async fn create_chat(
&self,
args: ChatArguments
) -> Result<ChatResponse, Error>
pub async fn create_chat( &self, args: ChatArguments ) -> Result<ChatResponse, Error>
Given a list of messages comprising a conversation, the model will return a response.
See https://platform.openai.com/docs/api-reference/chat.
let client = openai_rust::Client::new(api_key);
let args = openai_rust::chat::ChatArguments::new("gpt-3.5-turbo", vec![
openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}
]);
let res = client.create_chat(args).await.unwrap();
println!("{}", res.choices[0].message.content);
Examples found in repository?
5 6 7 8 9 10 11 12 13 14 15 16
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new(
"gpt-3.5-turbo",
vec![openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}],
);
let res = client.create_chat(args).await.unwrap();
println!("{}", res);
}
sourcepub async fn create_chat_stream(
&self,
args: ChatArguments
) -> Result<impl Stream<Item = Result<Vec<ChatResponseEvent>>>>
pub async fn create_chat_stream( &self, args: ChatArguments ) -> Result<impl Stream<Item = Result<Vec<ChatResponseEvent>>>>
Like Client::create_chat but with streaming.
See https://platform.openai.com/docs/api-reference/chat.
This method will return a stream. Calling next on it will return a vector of chat::stream::ChatResponseEvents.
use openai_rust::futures_util::StreamExt;
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event.choices[0].delta.content.as_ref().unwrap_or(&"".to_owned()));
std::io::stdout().flush().unwrap();
}
}
Examples found in repository?
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new(
"gpt-3.5-turbo",
vec![openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}],
);
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event);
std::io::stdout().flush().unwrap();
}
}
}
sourcepub async fn create_completion(
&self,
args: CompletionArguments
) -> Result<CompletionResponse>
pub async fn create_completion( &self, args: CompletionArguments ) -> Result<CompletionResponse>
Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position.
See https://platform.openai.com/docs/api-reference/completions
let c = openai_rust::Client::new(api_key);
let args = openai_rust::completions::CompletionArguments::new("text-davinci-003", "The quick brown fox".to_owned());
println!("{}", c.create_completion(args).await.unwrap().choices[0].text);
sourcepub async fn create_edit(&self, args: EditArguments) -> Result<EditResponse>
pub async fn create_edit(&self, args: EditArguments) -> Result<EditResponse>
Given a prompt and an instruction, the model will return an edited version of the prompt.
See https://platform.openai.com/docs/api-reference/edits
let c = openai_rust::Client::new(api_key);
let args = openai_rust::edits::EditArguments::new("text-davinci-edit-001", "The quick brown fox".to_owned(), "Complete this sentence.".to_owned());
println!("{}", c.create_edit(args).await.unwrap().to_string());
sourcepub async fn create_embeddings(
&self,
args: EmbeddingsArguments
) -> Result<EmbeddingsResponse>
pub async fn create_embeddings( &self, args: EmbeddingsArguments ) -> Result<EmbeddingsResponse>
Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms.
See https://platform.openai.com/docs/api-reference/embeddings
let c = openai_rust::Client::new(api_key);
let args = openai_rust::embeddings::EmbeddingsArguments::new("text-embedding-ada-002", "The food was delicious and the waiter...".to_owned());
println!("{:?}", c.create_embeddings(args).await.unwrap().data);