pub struct OpenAIChatCompletionClient { /* private fields */ }
Expand description
§OpenAIChatCompletionClient
Allows for interacting with open ai models
§Examples
use rag_toolchain::common::*;
use rag_toolchain::clients::*;
use serde_json::Map;
use serde_json::Value;
async fn generate_completion() {
let model: OpenAIModel = OpenAIModel::Gpt3Point5Turbo;
let mut additional_config: Map<String, Value> = Map::new();
additional_config.insert("temperature".into(), 0.5.into());
let client: OpenAIChatCompletionClient =
OpenAIChatCompletionClient::try_new_with_additional_config(model, additional_config).unwrap();
let system_message: PromptMessage = PromptMessage::SystemMessage(
"You are a comedian that cant ever reply to someone unless its phrased as a sarcastic joke"
.into());
let user_message: PromptMessage =
PromptMessage::HumanMessage("What is the weather like today ?".into());
let reply = client
.invoke(vec![system_message, user_message])
.await
.unwrap();
println!("{:?}", reply.content());
}
§Required Environment Variables
OPENAI_API_KEY: The API key to use for the OpenAI API
Implementations§
Source§impl OpenAIChatCompletionClient
impl OpenAIChatCompletionClient
Sourcepub fn try_new(
model: OpenAIModel,
) -> Result<OpenAIChatCompletionClient, VarError>
pub fn try_new( model: OpenAIModel, ) -> Result<OpenAIChatCompletionClient, VarError>
§OpenAIChatCompletionClient::try_new
This method creates a new OpenAIChatCompletionClient. All inference parameters used will be the default ones provided by the OpenAI API.
§Arguments
model
:OpenAIModel
- The model to use for the chat completion.
§Errors
VarError
- if the OPENAI_API_KEY environment variable is not set.
§Returns
OpenAIChatCompletionClient
- the chat completion client.
Sourcepub fn try_new_with_additional_config(
model: OpenAIModel,
additional_config: Map<String, Value>,
) -> Result<OpenAIChatCompletionClient, VarError>
pub fn try_new_with_additional_config( model: OpenAIModel, additional_config: Map<String, Value>, ) -> Result<OpenAIChatCompletionClient, VarError>
§OpenAIChatCompletionClient::try_new_with_additional_config
This method creates a new OpenAIChatCompletionClient. All inference parameters provided in the additional_config will be used in the chat completion request. an example of this could be ‘temperature’, ‘top_p’, ‘seed’ etc.
§Forbidden Properties
- “stream”: this cannot be set as it is used internally by the client.
- “n”: n can be set but will result in wasted tokens as the client is built for single chat completions. We intend to add support for multiple completions in the future.
§Arguments
model
:OpenAIModel
- The model to use for the chat completion.additional_config
:Map<String, Value>
- The additional configuration to use for the chat completion.
§Errors
VarError
- if the OPENAI_API_KEY environment variable is not set.
§Returns
OpenAIChatCompletionClient
- the chat completion client.
Sourcepub fn try_new_with_url(
model: OpenAIModel,
url: String,
) -> Result<OpenAIChatCompletionClient, VarError>
pub fn try_new_with_url( model: OpenAIModel, url: String, ) -> Result<OpenAIChatCompletionClient, VarError>
§OpenAIChatCompletionClient::try_new_with_url
This method creates a new OpenAIChatCompletionClient. All inference parameters used will be the default ones provided by the OpenAI API. You can pass the url in directly
§Arguments
model
:OpenAIModel
- The model to use for the chat completion.url
:String
- The url to use for the api call.
§Errors
VarError
- if the OPENAI_API_KEY environment variable is not set.
§Returns
OpenAIChatCompletionClient
- the chat completion client.
Sourcepub fn try_new_with_url_and_additional_config(
model: OpenAIModel,
url: String,
additional_config: Map<String, Value>,
) -> Result<OpenAIChatCompletionClient, VarError>
pub fn try_new_with_url_and_additional_config( model: OpenAIModel, url: String, additional_config: Map<String, Value>, ) -> Result<OpenAIChatCompletionClient, VarError>
§OpenAIChatCompletionClient::try_new_with_url_and_additional_config
This method creates a new OpenAIChatCompletionClient. All inference parameters provided in the additional_config will be used in the chat completion request. an example of this could be ‘temperature’, ‘top_p’, ‘seed’ etc. You can pass the url in directly.
§Forbidden Properties
- “stream”: this cannot be set as it is used internally by the client.
- “n”: n can be set but will result in wasted tokens as the client is built for single chat completions. We intend to add support for multiple completions in the future.
§Arguments
model
:OpenAIModel
- The model to use for the chat completion.url
:String
- The url to use for the api call.additional_config
:Map<String, Value>
- The additional configuration to use for the chat completion.
§Errors
VarError
- if the OPENAI_API_KEY environment variable is not set.
§Returns
OpenAIChatCompletionClient
- the chat completion client.
Trait Implementations§
Source§impl AsyncChatClient for OpenAIChatCompletionClient
impl AsyncChatClient for OpenAIChatCompletionClient
Source§async fn invoke(
&self,
prompt_messages: Vec<PromptMessage>,
) -> Result<PromptMessage, Self::ErrorType>
async fn invoke( &self, prompt_messages: Vec<PromptMessage>, ) -> Result<PromptMessage, Self::ErrorType>
§OpenAIChatCompletionClient::invoke
function to execute the ChatCompletion given a list of prompt messages.
§Arguments
prompt_messages
:Vec<PromptMessage>
- the list of prompt messages that will be sent to the LLM.
§Errors
OpenAIError
- if the chat client invocation fails.
§Returns
PromptMessage::AIMessage
- the response from the chat client.
type ErrorType = OpenAIError
Source§impl AsyncStreamedChatClient for OpenAIChatCompletionClient
impl AsyncStreamedChatClient for OpenAIChatCompletionClient
Source§async fn invoke_stream(
&self,
prompt_messages: Vec<PromptMessage>,
) -> Result<Self::Item, Self::ErrorType>
async fn invoke_stream( &self, prompt_messages: Vec<PromptMessage>, ) -> Result<Self::Item, Self::ErrorType>
§OpenAIChatCompletionClient::invoke_stream
function to execute the ChatCompletion given a list of prompt messages.
§Arguments
prompt_messages
:PromptMessage
- the list of prompt messages that will be sent to the LLM.
§Errors
OpenAIError
- if the chat client invocation fails.
§Returns
impl ChatCompletionStream
- the response from the chat client.
type ErrorType = OpenAIError
type Item = OpenAICompletionStream
Auto Trait Implementations§
impl Freeze for OpenAIChatCompletionClient
impl !RefUnwindSafe for OpenAIChatCompletionClient
impl Send for OpenAIChatCompletionClient
impl Sync for OpenAIChatCompletionClient
impl Unpin for OpenAIChatCompletionClient
impl !UnwindSafe for OpenAIChatCompletionClient
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read more