pub struct LocalProvider { /* private fields */ }Expand description
Local LLM provider for Ollama, LM Studio, vLLM, and other OpenAI-compatible local servers.
This provider wraps OpenAiProvider with sensible defaults for local inference:
- No API key required (empty string by default)
- Default localhost URL (Ollama: 11434)
- Extended timeout for slower local inference
Implementations§
Source§impl LocalProvider
impl LocalProvider
Sourcepub const DEFAULT_OLLAMA_URL: &'static str = "http://localhost:11434/v1/chat/completions"
pub const DEFAULT_OLLAMA_URL: &'static str = "http://localhost:11434/v1/chat/completions"
Default Ollama endpoint
Sourcepub const DEFAULT_LMSTUDIO_URL: &'static str = "http://localhost:1234/v1/chat/completions"
pub const DEFAULT_LMSTUDIO_URL: &'static str = "http://localhost:1234/v1/chat/completions"
Default LM Studio endpoint
Sourcepub const DEFAULT_VLLM_URL: &'static str = "http://localhost:8000/v1/chat/completions"
pub const DEFAULT_VLLM_URL: &'static str = "http://localhost:8000/v1/chat/completions"
Default vLLM endpoint
Sourcepub fn new(
base_url: Option<&str>,
model: &str,
max_tokens: u32,
timeout: u64,
) -> Self
pub fn new( base_url: Option<&str>, model: &str, max_tokens: u32, timeout: u64, ) -> Self
Create a new local provider
§Arguments
base_url- Optional custom endpoint. Defaults to Ollama.model- Model name to use (required)max_tokens- Maximum output tokenstimeout- Request timeout in seconds (default: 120 for slower local inference)
Sourcepub fn ollama(model: &str, max_tokens: u32, timeout: u64) -> Self
pub fn ollama(model: &str, max_tokens: u32, timeout: u64) -> Self
Create provider configured for Ollama
Trait Implementations§
Source§impl Clone for LocalProvider
impl Clone for LocalProvider
Source§fn clone(&self) -> LocalProvider
fn clone(&self) -> LocalProvider
Returns a duplicate of the value. Read more
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
Performs copy-assignment from
source. Read moreSource§impl LlmProvider for LocalProvider
impl LlmProvider for LocalProvider
Source§fn send<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<Tool>,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<ProviderResponseChunk, LlmError>> + Send + '_>>, LlmError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
fn send<'life0, 'async_trait>(
&'life0 self,
messages: Vec<Message>,
tools: Vec<Tool>,
) -> Pin<Box<dyn Future<Output = Result<Pin<Box<dyn Stream<Item = Result<ProviderResponseChunk, LlmError>> + Send + '_>>, LlmError>> + Send + 'async_trait>>where
Self: 'async_trait,
'life0: 'async_trait,
Send messages to the LLM and receive streaming response
Source§fn provider_name(&self) -> &str
fn provider_name(&self) -> &str
Get provider name
Source§fn model_name(&self) -> &str
fn model_name(&self) -> &str
Get model name
Source§fn clone_box(&self) -> Box<dyn LlmProvider>
fn clone_box(&self) -> Box<dyn LlmProvider>
Clone the provider
Auto Trait Implementations§
impl Freeze for LocalProvider
impl !RefUnwindSafe for LocalProvider
impl Send for LocalProvider
impl Sync for LocalProvider
impl Unpin for LocalProvider
impl UnsafeUnpin for LocalProvider
impl !UnwindSafe for LocalProvider
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more