Skip to main content

tuitbot_core/llm/
mod.rs

1//! LLM provider abstraction and implementations.
2//!
3//! Provides a trait-based abstraction for LLM providers (OpenAI, Anthropic, Ollama)
4//! with typed responses, token usage tracking, and health checking.
5
6pub mod anthropic;
7pub mod factory;
8pub mod openai_compat;
9pub mod pricing;
10
11use crate::error::LlmError;
12
13/// Token usage information from an LLM completion.
14#[derive(Debug, Clone, Default, serde::Serialize, serde::Deserialize)]
15pub struct TokenUsage {
16    /// Number of tokens in the input/prompt.
17    pub input_tokens: u32,
18    /// Number of tokens in the output/completion.
19    pub output_tokens: u32,
20}
21
22impl TokenUsage {
23    /// Accumulate token counts from another usage record (e.g. across retries).
24    pub fn accumulate(&mut self, other: &TokenUsage) {
25        self.input_tokens += other.input_tokens;
26        self.output_tokens += other.output_tokens;
27    }
28}
29
30/// Response from an LLM completion request.
31#[derive(Debug, Clone)]
32pub struct LlmResponse {
33    /// The generated text content.
34    pub text: String,
35    /// Token usage for this completion.
36    pub usage: TokenUsage,
37    /// The model that produced this response.
38    pub model: String,
39}
40
41/// Parameters controlling LLM generation behavior.
42#[derive(Debug, Clone)]
43pub struct GenerationParams {
44    /// Maximum number of tokens to generate.
45    pub max_tokens: u32,
46    /// Sampling temperature (0.0 = deterministic, 1.0+ = creative).
47    pub temperature: f32,
48    /// Optional system prompt override. If `Some`, replaces the caller's system prompt.
49    pub system_prompt: Option<String>,
50}
51
52impl Default for GenerationParams {
53    fn default() -> Self {
54        Self {
55            max_tokens: 512,
56            temperature: 0.7,
57            system_prompt: None,
58        }
59    }
60}
61
62/// Trait abstracting all LLM provider operations.
63///
64/// Implementations include `OpenAiCompatProvider` (for OpenAI and Ollama)
65/// and `AnthropicProvider`. The trait is object-safe for use as `Box<dyn LlmProvider>`.
66#[async_trait::async_trait]
67pub trait LlmProvider: Send + Sync {
68    /// Returns the display name of this provider (e.g., "openai", "anthropic", "ollama").
69    fn name(&self) -> &str;
70
71    /// Send a completion request to the LLM.
72    ///
73    /// If `params.system_prompt` is `Some`, it overrides the `system` parameter.
74    async fn complete(
75        &self,
76        system: &str,
77        user_message: &str,
78        params: &GenerationParams,
79    ) -> Result<LlmResponse, LlmError>;
80
81    /// Check if the provider is reachable and configured correctly.
82    async fn health_check(&self) -> Result<(), LlmError>;
83}