llm_observatory_core/
provider.rs

1// Copyright 2025 LLM Observatory Contributors
2// SPDX-License-Identifier: Apache-2.0
3
4//! Provider trait definitions and utilities.
5
6use crate::Result;
7use async_trait::async_trait;
8
9/// Trait for LLM provider implementations.
10#[async_trait]
11pub trait LlmProvider: Send + Sync {
12    /// Get the provider name.
13    fn name(&self) -> &str;
14
15    /// Check if the provider is configured and ready.
16    async fn is_ready(&self) -> Result<bool>;
17
18    /// Get pricing information for a model.
19    async fn get_pricing(&self, model: &str) -> Result<Pricing>;
20}
21
22/// Pricing information for a model.
23#[derive(Debug, Clone)]
24pub struct Pricing {
25    /// Model name
26    pub model: String,
27    /// Cost per 1000 prompt tokens (USD)
28    pub prompt_cost_per_1k: f64,
29    /// Cost per 1000 completion tokens (USD)
30    pub completion_cost_per_1k: f64,
31}
32
33impl Pricing {
34    /// Calculate cost for given token usage.
35    pub fn calculate_cost(&self, prompt_tokens: u32, completion_tokens: u32) -> f64 {
36        let prompt_cost = (prompt_tokens as f64 / 1000.0) * self.prompt_cost_per_1k;
37        let completion_cost = (completion_tokens as f64 / 1000.0) * self.completion_cost_per_1k;
38        prompt_cost + completion_cost
39    }
40}
41
42#[cfg(test)]
43mod tests {
44    use super::*;
45
46    #[test]
47    fn test_pricing_calculation() {
48        let pricing = Pricing {
49            model: "gpt-4".to_string(),
50            prompt_cost_per_1k: 0.03,
51            completion_cost_per_1k: 0.06,
52        };
53
54        let cost = pricing.calculate_cost(1000, 500);
55        assert!((cost - 0.06).abs() < 0.0001); // 0.03 + 0.03
56    }
57}