pub struct LLMServiceFlows<'a> { /* private fields */ }Expand description
The main struct for setting the basic configuration for LLM Service interface.
Implementations§
Source§impl<'a> LLMServiceFlows<'a>
impl<'a> LLMServiceFlows<'a>
Sourcepub async fn transcribe(
&self,
input: TranscribeInput,
) -> Result<TranscriptionOutput, String>
pub async fn transcribe( &self, input: TranscribeInput, ) -> Result<TranscriptionOutput, String>
Transcribe audio into the input language.
input is an TranscribeInput object.
// This code snippet transcribe input audio into English, the audio is collected in previous step.
// Prepare the TranscribeInput struct.
let input = TranscribeInput {
audio: audio,
audio_format: "wav".to_string(),
language: "en".to_string(),
};
// Call the transcribe function.
let transcription = match llm.transcribe(input).await {
Ok(r) => r.text,
Err(e) => {your error handling},
};Sourcepub async fn translate(
&self,
input: TranslateInput,
) -> Result<TranslationOutput, String>
pub async fn translate( &self, input: TranslateInput, ) -> Result<TranslationOutput, String>
Translate audio into English.
input is an TranslateInput object.
// This code snippet translate input audio into English, the audio is collected in previous step.
// Prepare the TranslateInput struct.
let input = TranslateInput {
audio: audio,
audio_format: "wav".to_string(),
language: "zh".to_string(),
};
// Call the translate function.
let translation = match llm.translate(input).await {
Ok(r) => r.text,
Err(e) => {your error handling},
};Source§impl<'a> LLMServiceFlows<'a>
impl<'a> LLMServiceFlows<'a>
Sourcepub async fn chat_completion(
&self,
conversation_id: &str,
sentence: &str,
options: &ChatOptions<'_>,
) -> Result<ChatResponse, String>
pub async fn chat_completion( &self, conversation_id: &str, sentence: &str, options: &ChatOptions<'_>, ) -> Result<ChatResponse, String>
Create chat completion with the provided sentence. It uses OpenAI’s GPT-4 model to make a conversation.
conversation_id is the identifier of the conversation.
The history will be fetched and attached to the sentence as a whole prompt for ChatGPT.
sentence is a String that reprensents the current utterance of the conversation.
// Create a conversation_id.
// Only numbers, letters, underscores, dashes, and pound signs are allowed, up to 50 characters.
let chat_id = format!("news-summary-N");
// System_prompt content in text.
let system = &format!("You're a news editor AI.");
// Create ChatOptions.
let co = ChatOptions {
model: Some("gpt-4"),
token_limit: 8192,
restart: true,
system_prompt: Some(system),
// Use .. to extract the default value for the remaining fields.
..Default::default()
};
// Create a `sentence`, the concatenation of user prompt and the text to work with.
let question = format!("Make a concise summary within 200 words on this: {news_body}.");
// Chat completion to get the result and handle the failure.
match llm.chat_completion(&chat_id, &question, &co).await {
Ok(r) => Ok(r.choice),
Err(e) => Err(e.into()),
}Source§impl<'a> LLMServiceFlows<'a>
impl<'a> LLMServiceFlows<'a>
Sourcepub async fn create_embeddings(
&self,
model: Option<&str>,
input: EmbeddingsInput,
) -> Result<Vec<Vec<f64>>, String>
pub async fn create_embeddings( &self, model: Option<&str>, input: EmbeddingsInput, ) -> Result<Vec<Vec<f64>>, String>
Create embeddings from the provided input.
params is an EmbeddingsInput object.
// This code snippet computes embeddings for `text`, the question created in previous step.
// Wrap the `text` in EmbeddingsInput struct.
let input = EmbeddingsInput::String(text.to_string());
// Call the create_embeddings function.
let question_vector = match llm.create_embeddings(Some("text-embedding-ada-002"), input).await {
Ok(r) => r[0],
Err(e) => {your error handling},
};Source§impl<'a> LLMServiceFlows<'a>
impl<'a> LLMServiceFlows<'a>
pub fn new(service_endpoint: &'a str) -> LLMServiceFlows<'a>
pub fn set_retry_times(&mut self, retry_times: u8)
pub fn set_api_key(&mut self, api_key: &'a str)
Auto Trait Implementations§
impl<'a> Freeze for LLMServiceFlows<'a>
impl<'a> RefUnwindSafe for LLMServiceFlows<'a>
impl<'a> Send for LLMServiceFlows<'a>
impl<'a> Sync for LLMServiceFlows<'a>
impl<'a> Unpin for LLMServiceFlows<'a>
impl<'a> UnwindSafe for LLMServiceFlows<'a>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more