pub trait LlmProviderTrait: Send + Sync {
// Required methods
fn generate_completion<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait;
fn generate_chat_completion<'life0, 'async_trait>(
&'life0 self,
messages: Vec<ChatMessage>,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait;
fn get_available_models<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = Result<Vec<String>>> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait;
fn is_available<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = bool> + Send + 'async_trait>>
where Self: 'async_trait,
'life0: 'async_trait;
fn name(&self) -> &'static str;
fn max_context_length(&self) -> usize;
}Expand description
LLM provider trait
Required Methods§
Sourcefn generate_completion<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
fn generate_completion<'life0, 'life1, 'async_trait>(
&'life0 self,
prompt: &'life1 str,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
'life1: 'async_trait,
Generate text completion
Sourcefn generate_chat_completion<'life0, 'async_trait>(
&'life0 self,
messages: Vec<ChatMessage>,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn generate_chat_completion<'life0, 'async_trait>(
&'life0 self,
messages: Vec<ChatMessage>,
max_tokens: Option<usize>,
temperature: Option<f32>,
top_p: Option<f32>,
stop_sequences: Option<Vec<String>>,
) -> Pin<Box<dyn Future<Output = Result<String>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Generate chat completion
Sourcefn get_available_models<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = Result<Vec<String>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn get_available_models<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = Result<Vec<String>>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Get available models
Sourcefn is_available<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = bool> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn is_available<'life0, 'async_trait>(
&'life0 self,
) -> Pin<Box<dyn Future<Output = bool> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Check if provider is available
Sourcefn max_context_length(&self) -> usize
fn max_context_length(&self) -> usize
Get maximum context length