llm_observatory_core/
provider.rs1use crate::Result;
7use async_trait::async_trait;
8
9#[async_trait]
11pub trait LlmProvider: Send + Sync {
12 fn name(&self) -> &str;
14
15 async fn is_ready(&self) -> Result<bool>;
17
18 async fn get_pricing(&self, model: &str) -> Result<Pricing>;
20}
21
22#[derive(Debug, Clone)]
24pub struct Pricing {
25 pub model: String,
27 pub prompt_cost_per_1k: f64,
29 pub completion_cost_per_1k: f64,
31}
32
33impl Pricing {
34 pub fn calculate_cost(&self, prompt_tokens: u32, completion_tokens: u32) -> f64 {
36 let prompt_cost = (prompt_tokens as f64 / 1000.0) * self.prompt_cost_per_1k;
37 let completion_cost = (completion_tokens as f64 / 1000.0) * self.completion_cost_per_1k;
38 prompt_cost + completion_cost
39 }
40}
41
42#[cfg(test)]
43mod tests {
44 use super::*;
45
46 #[test]
47 fn test_pricing_calculation() {
48 let pricing = Pricing {
49 model: "gpt-4".to_string(),
50 prompt_cost_per_1k: 0.03,
51 completion_cost_per_1k: 0.06,
52 };
53
54 let cost = pricing.calculate_cost(1000, 500);
55 assert!((cost - 0.06).abs() < 0.0001); }
57}