llm_api_access/
llm.rs

1// src/llm.rs
2use async_trait::async_trait;
3use crate::openai::call_gpt;
4use crate::gemini::{call_gemini, conversation_gemini_call, get_gemini_model_info, list_gemini_models, count_gemini_tokens};
5use crate::anthropic::call_anthropic;
6use crate::models::gemini::ModelInfo;
7use crate::errors::GeneralError;
8use crate::structs::general::{Message, Content, Part};
9
10pub enum LLM {
11    OpenAI,
12    Gemini,
13    Anthropic,
14}
15
16#[async_trait]
17pub trait Access {
18    async fn send_single_message(
19        &self,
20        message: &str,
21        model: Option<&str>,
22    ) -> Result<String, Box<dyn std::error::Error + Send + Sync>>;
23    async fn send_convo_message(
24        &self,
25        messages: Vec<Message>,
26        model: Option<&str>,
27    ) -> Result<String, Box<dyn std::error::Error + Send + Sync>>;
28    async fn get_model_info(
29        &self,
30        model: &str,
31    ) -> Result<ModelInfo, Box<dyn std::error::Error + Send + Sync>>;
32    async fn list_models(&self)
33        -> Result<Vec<ModelInfo>, Box<dyn std::error::Error + Send + Sync>>;
34    async fn count_tokens(
35        &self,
36        text: &str,
37        model: &str,
38    ) -> Result<u32, Box<dyn std::error::Error + Send + Sync>>;
39}
40
41#[async_trait]
42impl Access for LLM {
43    async fn send_single_message(
44        &self,
45        message: &str,
46        model: Option<&str>,
47    ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
48        match self {
49            LLM::OpenAI => {
50                let openai_message: Message = Message {
51                    role: "user".to_string(),
52                    content: message.to_string(),
53                };
54                call_gpt(vec![openai_message]).await
55            }
56            LLM::Gemini => {
57                let gemini_message: Message = Message {
58                    role: "user".to_string(),
59                    content: message.to_string(),
60                };
61                call_gemini(vec![gemini_message], model).await
62            }
63            LLM::Anthropic => {
64                let anthropic_message: Message = Message {
65                    role: "user".to_string(),
66                    content: message.to_string(),
67                };
68                call_anthropic(vec![anthropic_message]).await
69            }
70        }
71    }
72    
73    async fn send_convo_message(
74        &self,
75        messages: Vec<Message>,
76        model: Option<&str>,
77    ) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
78        match self {
79            LLM::OpenAI => call_gpt(messages).await,
80            LLM::Gemini => {
81                let gemini_messages: Vec<Content> = messages
82                    .into_iter()
83                    .map(|msg| Content {
84                        role: msg.role,
85                        parts: vec![Part {
86                            text: msg.content,
87                        }],
88                    })
89                    .collect();
90    
91                conversation_gemini_call(gemini_messages, model).await
92            }
93            LLM::Anthropic => call_anthropic(messages).await,
94        }
95    }
96
97    async fn get_model_info(
98        &self,
99        model: &str,
100    ) -> Result<ModelInfo, Box<dyn std::error::Error + Send + Sync>> {
101        match self {
102            LLM::Gemini => get_gemini_model_info(model).await,
103            _ => Err(Box::new(GeneralError {
104                message: format!("Currently only Gemini is implemented for get_model_info func"),
105            }) as Box<dyn std::error::Error + Send + Sync>),
106        }
107    }
108
109    async fn list_models(
110        &self,
111    ) -> Result<Vec<ModelInfo>, Box<dyn std::error::Error + Send + Sync>> {
112        match self {
113            LLM::Gemini => list_gemini_models().await,
114            _ => Err(Box::new(GeneralError {
115                message: format!("Currently only Gemini is implemented for list_models func"),
116            }) as Box<dyn std::error::Error + Send + Sync>),
117        }
118    }
119
120    async fn count_tokens(
121        &self,
122        text: &str,
123        model: &str,
124    ) -> Result<u32, Box<dyn std::error::Error + Send + Sync>> {
125        match self {
126            LLM::Gemini => count_gemini_tokens(text, model).await,
127            _ => Err(Box::new(GeneralError {
128                message: format!("Currently only Gemini is implemented for count_tokens func"),
129            }) as Box<dyn std::error::Error + Send + Sync>),
130        }
131    }
132}