Skip to main content

self_llm/
config.rs

1use std::collections::HashMap;
2use std::sync::atomic::{AtomicI64, Ordering};
3
4use crate::client::Client;
5
6static LLM_CONFIG_ID: AtomicI64 = AtomicI64::new(1);
7static LLM_PROVIDER_CONFIG_ID: AtomicI64 = AtomicI64::new(1);
8
9/// Provider API type.
10#[derive(Debug, Clone, PartialEq, Eq)]
11pub enum ProviderType {
12    OpenAi,
13    Anthropic,
14}
15
16/// Configuration for a specific LLM model.
17#[derive(Debug, Clone)]
18pub struct LlmConfig {
19    pub id: i64,
20    pub model_name: String,
21    pub model_id: String,
22    pub max_output_token: i32,
23    pub max_input_token: i32,
24    pub max_token: i32,
25    pub thinking: bool,
26    pub image_understanding: bool,
27    pub struct_output: bool,
28    pub tool_use: bool,
29    pub temperature: f32,
30    pub top_p: f32,
31}
32
33/// Configuration for an LLM provider (vendor endpoint).
34#[derive(Debug, Clone)]
35pub struct LlmProviderConfig {
36    pub id: i64,
37    pub provider_name: String,
38    pub base_url: String,
39    pub provider_type: ProviderType,
40    pub api_key: String,
41    pub custom_header: HashMap<String, String>,
42}
43
44impl LlmConfig {
45    pub fn new(
46        model_name: impl Into<String>,
47        model_id: impl Into<String>,
48        max_output_token: i32,
49        max_input_token: i32,
50        max_token: i32,
51    ) -> Self {
52        Self {
53            id: LLM_CONFIG_ID.fetch_add(1, Ordering::Relaxed),
54            model_name: model_name.into(),
55            model_id: model_id.into(),
56            max_output_token,
57            max_input_token,
58            max_token,
59            thinking: false,
60            image_understanding: false,
61            struct_output: false,
62            tool_use: false,
63            temperature: 1.0,
64            top_p: 1.0,
65        }
66    }
67
68    pub fn thinking(mut self, v: bool) -> Self {
69        self.thinking = v;
70        self
71    }
72
73    pub fn image_understanding(mut self, v: bool) -> Self {
74        self.image_understanding = v;
75        self
76    }
77
78    pub fn struct_output(mut self, v: bool) -> Self {
79        self.struct_output = v;
80        self
81    }
82
83    pub fn tool_use(mut self, v: bool) -> Self {
84        self.tool_use = v;
85        self
86    }
87
88    pub fn temperature(mut self, v: f32) -> Self {
89        self.temperature = v;
90        self
91    }
92
93    pub fn top_p(mut self, v: f32) -> Self {
94        self.top_p = v;
95        self
96    }
97}
98
99impl LlmProviderConfig {
100    pub fn new(
101        provider_name: impl Into<String>,
102        base_url: impl Into<String>,
103        provider_type: ProviderType,
104        api_key: impl Into<String>,
105    ) -> Self {
106        Self {
107            id: LLM_PROVIDER_CONFIG_ID.fetch_add(1, Ordering::Relaxed),
108            provider_name: provider_name.into(),
109            base_url: base_url.into(),
110            provider_type,
111            api_key: api_key.into(),
112            custom_header: HashMap::new(),
113        }
114    }
115
116    pub fn custom_header(mut self, key: impl Into<String>, value: impl Into<String>) -> Self {
117        self.custom_header.insert(key.into(), value.into());
118        self
119    }
120
121    pub fn custom_headers(mut self, headers: HashMap<String, String>) -> Self {
122        self.custom_header = headers;
123        self
124    }
125
126    /// Build a [`Client`] from this provider configuration.
127    pub fn build_client(&self) -> Client {
128        Client::from_provider(self)
129    }
130}