llm/providers/local/
llama_cpp.rs1use super::util::get_local_config;
2use crate::providers::openai::OpenAiChatProvider;
3use crate::{ProviderFactory, Result};
4use async_openai::{Client, config::OpenAIConfig};
5
6pub struct LlamaCppProvider {
7 client: Client<OpenAIConfig>,
8}
9
10impl LlamaCppProvider {
11 pub fn new(base_url: &str) -> Self {
12 Self {
13 client: Client::with_config(get_local_config(base_url)),
14 }
15 }
16}
17
18impl Default for LlamaCppProvider {
19 fn default() -> Self {
20 Self {
21 client: Client::with_config(get_local_config("http://localhost:8080/v1")),
22 }
23 }
24}
25
26impl ProviderFactory for LlamaCppProvider {
27 fn from_env() -> Result<Self> {
28 Ok(Self::default())
29 }
30
31 fn with_model(self, _model: &str) -> Self {
32 self
34 }
35}
36
37impl OpenAiChatProvider for LlamaCppProvider {
38 type Config = OpenAIConfig;
39
40 fn client(&self) -> &Client<Self::Config> {
41 &self.client
42 }
43
44 fn model(&self) -> &'static str {
45 "" }
47
48 fn provider_name(&self) -> &'static str {
49 "LlamaCpp"
50 }
51}