Struct openai_rust::Client
source · pub struct Client { /* private fields */ }
Expand description
This is the main interface to interact with the api.
Implementations§
source§impl Client
impl Client
sourcepub fn new(api_key: &str) -> Client
pub fn new(api_key: &str) -> Client
Create a new client. This will automatically build a reqwest::Client used internally.
Examples found in repository?
More examples
examples/chat_stream_example.rs (line 8)
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new("gpt-3.5-turbo", vec![
openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}
]);
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event);
std::io::stdout().flush().unwrap();
}
}
}
sourcepub async fn list_models(&self) -> Result<Vec<Model>, Error>
pub async fn list_models(&self) -> Result<Vec<Model>, Error>
Lists the currently available models, and provides basic information about each one such as the owner and availability.
let client = openai_rust::Client::new(api_key);
let models = client.list_models().await.unwrap();
See https://platform.openai.com/docs/api-reference/models/list.
sourcepub async fn create_chat(
&self,
args: ChatArguments
) -> Result<ChatResponse, Error>
pub async fn create_chat( &self, args: ChatArguments ) -> Result<ChatResponse, Error>
Given a chat conversation, the model will return a chat completion response.
See https://platform.openai.com/docs/api-reference/chat.
let client = openai_rust::Client::new(api_key);
let args = openai_rust::chat::ChatArguments::new("gpt-3.5-turbo", vec![
openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}
]);
let res = client.create_chat(args).await.unwrap();
println!("{}", res.choices[0].message.content);
sourcepub async fn create_chat_stream(
&self,
args: ChatArguments
) -> Result<impl Stream<Item = Result<Vec<ChatResponseEvent>>>>
pub async fn create_chat_stream( &self, args: ChatArguments ) -> Result<impl Stream<Item = Result<Vec<ChatResponseEvent>>>>
Like Client::create_chat but with streaming.
See https://platform.openai.com/docs/api-reference/chat.
This method will return a stream. Calling next on it will return a vector of chat::stream::ChatResponseEvents.
use openai_rust::futures_util::StreamExt;
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event.choices[0].delta.content.as_ref().unwrap_or(&"".to_owned()));
std::io::stdout().flush().unwrap();
}
}
Examples found in repository?
examples/chat_stream_example.rs (line 15)
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
async fn main() {
let client = openai_rust::Client::new(&std::env::var("OPENAI_API_KEY").unwrap());
let args = openai_rust::chat::ChatArguments::new("gpt-3.5-turbo", vec![
openai_rust::chat::Message {
role: "user".to_owned(),
content: "Hello GPT!".to_owned(),
}
]);
let mut res = client.create_chat_stream(args).await.unwrap();
while let Some(events) = res.next().await {
for event in events.unwrap() {
print!("{}", event);
std::io::stdout().flush().unwrap();
}
}
}
sourcepub async fn create_completion(
&self,
args: CompletionArguments
) -> Result<CompletionResponse>
pub async fn create_completion( &self, args: CompletionArguments ) -> Result<CompletionResponse>
Creates a completion for the provided prompt and parameters
let c = openai_rust::Client::new(api_key);
let args = openai_rust::completions::CompletionArguments::new("text-davinci-003", "The quick brown fox".to_owned());
println!("{}", c.create_completion(args).await.unwrap().choices[0].text);