Skip to main content

walrus_model/
provider.rs

1//! Provider implementation.
2//!
3//! Unified `Provider` enum with enum dispatch over concrete backends.
4//! `build_provider()` constructs the appropriate variant based on `ApiStandard`.
5
6use crate::{
7    config::{ApiStandard, ProviderDef},
8    remote::{
9        claude::{self, Claude},
10        openai::{self, OpenAI},
11    },
12};
13use anyhow::Result;
14use async_stream::try_stream;
15use compact_str::CompactString;
16use futures_core::Stream;
17use futures_util::StreamExt;
18use wcore::model::{Model, Response, StreamChunk};
19
20/// Unified LLM provider enum.
21///
22/// The gateway constructs the appropriate variant based on `ApiStandard`
23/// from the provider config.
24#[derive(Clone)]
25pub enum Provider {
26    /// OpenAI-compatible API (covers OpenAI, DeepSeek, Grok, Qwen, Kimi, Ollama).
27    OpenAI(OpenAI),
28    /// Anthropic Messages API.
29    Claude(Claude),
30}
31
32/// Construct a `Provider` from a provider definition and model name.
33///
34/// Uses `effective_standard()` to pick the API protocol (OpenAI or Anthropic).
35pub async fn build_provider(
36    def: &ProviderDef,
37    model: &str,
38    client: reqwest::Client,
39) -> Result<Provider> {
40    let api_key = def.api_key.as_deref().unwrap_or("");
41
42    match def.effective_standard() {
43        ApiStandard::Anthropic => {
44            let url = def.base_url.as_deref().unwrap_or(claude::ENDPOINT);
45            Ok(Provider::Claude(Claude::custom(
46                client, api_key, url, model,
47            )?))
48        }
49        ApiStandard::OpenAI => {
50            let url = def.base_url.as_deref().unwrap_or(openai::endpoint::OPENAI);
51            let provider = if api_key.is_empty() {
52                OpenAI::no_auth(client, url, model)
53            } else {
54                OpenAI::custom(client, api_key, url, model)?
55            };
56            Ok(Provider::OpenAI(provider))
57        }
58    }
59}
60
61impl Model for Provider {
62    async fn send(&self, request: &wcore::model::Request) -> Result<Response> {
63        match self {
64            Self::OpenAI(p) => p.send(request).await,
65            Self::Claude(p) => p.send(request).await,
66        }
67    }
68
69    fn stream(
70        &self,
71        request: wcore::model::Request,
72    ) -> impl Stream<Item = Result<StreamChunk>> + Send {
73        let this = self.clone();
74        try_stream! {
75            match this {
76                Provider::OpenAI(p) => {
77                    let mut stream = std::pin::pin!(p.stream(request));
78                    while let Some(chunk) = stream.next().await {
79                        yield chunk?;
80                    }
81                }
82                Provider::Claude(p) => {
83                    let mut stream = std::pin::pin!(p.stream(request));
84                    while let Some(chunk) = stream.next().await {
85                        yield chunk?;
86                    }
87                }
88            }
89        }
90    }
91
92    fn context_limit(&self, model: &str) -> usize {
93        wcore::model::default_context_limit(model)
94    }
95
96    fn active_model(&self) -> CompactString {
97        match self {
98            Self::OpenAI(p) => p.active_model(),
99            Self::Claude(p) => p.active_model(),
100        }
101    }
102}