Skip to main content

evolve_llm/
client.rs

1//! The `LlmClient` trait and its shared types.
2
3use crate::error::LlmError;
4use async_trait::async_trait;
5
6/// Token usage reported by the provider. Both fields are 0 if unknown.
7#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
8pub struct TokenUsage {
9    /// Input tokens billed.
10    pub input: u32,
11    /// Output tokens billed.
12    pub output: u32,
13}
14
15/// One completion response.
16#[derive(Debug, Clone, PartialEq, Eq)]
17pub struct CompletionResult {
18    /// Assistant text.
19    pub text: String,
20    /// Token usage.
21    pub usage: TokenUsage,
22}
23
24/// Shared interface for LLM clients.
25#[async_trait]
26pub trait LlmClient: Send + Sync + std::fmt::Debug {
27    /// Run a single non-streaming completion.
28    async fn complete(&self, prompt: &str, max_tokens: u32) -> Result<CompletionResult, LlmError>;
29
30    /// Stable identifier used by the cost tracker price table.
31    fn model_id(&self) -> &str;
32}
33
34#[cfg(test)]
35mod tests {
36    use super::*;
37
38    #[test]
39    fn token_usage_default_is_zero() {
40        let u = TokenUsage::default();
41        assert_eq!(u.input, 0);
42        assert_eq!(u.output, 0);
43    }
44}