llm_api_rs/providers/
mod.rs

1pub mod anthropic;
2pub mod deepseek;
3pub mod gemini;
4pub mod openai;
5pub mod xai;
6
7use crate::core::{ChatCompletionRequest, ChatCompletionResponse};
8use crate::error::LlmApiError;
9use async_trait::async_trait;
10
11#[async_trait]
12pub trait LlmProvider {
13    async fn chat_completion(
14        &self,
15        request: ChatCompletionRequest,
16    ) -> Result<ChatCompletionResponse, LlmApiError>;
17}
18
19pub use anthropic::Anthropic;
20pub use deepseek::DeepSeek;
21pub use gemini::Gemini;
22pub use openai::OpenAI;
23pub use xai::XAI;