Skip to main content

evolve_llm/
client.rs

1//! The `LlmClient` trait and its shared types.
2
3use crate::error::LlmError;
4use async_trait::async_trait;
5
6/// Token usage reported by the provider. Both fields are 0 if unknown.
7#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)]
8pub struct TokenUsage {
9    /// Input tokens billed.
10    pub input: u32,
11    /// Output tokens billed.
12    pub output: u32,
13}
14
15/// One completion response.
16#[derive(Debug, Clone, PartialEq, Eq)]
17pub struct CompletionResult {
18    /// Assistant text.
19    pub text: String,
20    /// Token usage.
21    pub usage: TokenUsage,
22}
23
24/// Shared interface for LLM clients.
25#[async_trait]
26pub trait LlmClient: Send + Sync + std::fmt::Debug {
27    /// Run a single non-streaming completion.
28    async fn complete(&self, prompt: &str, max_tokens: u32) -> Result<CompletionResult, LlmError>;
29
30    /// Stable identifier used by the cost tracker price table.
31    fn model_id(&self) -> &str;
32}
33
34/// Placeholder client that always returns `LlmError::NoLlmAvailable`. Used by
35/// callers that need a `&dyn LlmClient` value but know they will never actually
36/// invoke it (e.g., when running only non-LLM mutators).
37#[derive(Debug, Default, Clone, Copy)]
38pub struct NoOpLlmClient;
39
40#[async_trait]
41impl LlmClient for NoOpLlmClient {
42    async fn complete(
43        &self,
44        _prompt: &str,
45        _max_tokens: u32,
46    ) -> Result<CompletionResult, LlmError> {
47        Err(LlmError::NoLlmAvailable)
48    }
49
50    fn model_id(&self) -> &str {
51        "noop"
52    }
53}
54
55#[cfg(test)]
56mod tests {
57    use super::*;
58
59    #[test]
60    fn token_usage_default_is_zero() {
61        let u = TokenUsage::default();
62        assert_eq!(u.input, 0);
63        assert_eq!(u.output, 0);
64    }
65}