Skip to main content

llmg_providers/
volcano.rs

1use llmg_core::{
2    provider::{ApiKeyCredentials, Credentials, LlmError, Provider},
3    types::{
4        ChatCompletionRequest, ChatCompletionResponse, Choice, EmbeddingRequest, EmbeddingResponse,
5        Message, Usage,
6    },
7};
8// use serde::{Serialize, Deserialize}; // removed unused imports
9
10/// Volcano AI API client
11#[derive(Debug)]
12pub struct VolcanoClient {
13    http_client: reqwest::Client,
14    base_url: String,
15    credentials: Box<dyn Credentials>,
16}
17
18/// Volcano-specific request format
19#[derive(Debug, serde::Serialize)]
20struct VolcanoRequest {
21    model: String,
22    messages: Vec<VolcanoMessage>,
23    #[serde(skip_serializing_if = "Option::is_none")]
24    max_tokens: Option<u32>,
25    #[serde(skip_serializing_if = "Option::is_none")]
26    temperature: Option<f32>,
27    #[serde(skip_serializing_if = "Option::is_none")]
28    stream: Option<bool>,
29}
30
31/// Volcano message format
32#[derive(Debug, serde::Serialize, serde::Deserialize)]
33struct VolcanoMessage {
34    role: String,
35    content: String,
36}
37
38/// Volcano response format
39#[derive(Debug, serde::Deserialize)]
40struct VolcanoResponse {
41    id: String,
42    choices: Vec<VolcanoChoice>,
43    model: String,
44    usage: VolcanoUsage,
45}
46
47#[derive(Debug, serde::Deserialize)]
48struct VolcanoChoice {
49    index: u32,
50    message: VolcanoMessage,
51    finish_reason: Option<String>,
52}
53
54#[derive(Debug, serde::Deserialize)]
55struct VolcanoUsage {
56    prompt_tokens: u32,
57    completion_tokens: u32,
58    total_tokens: u32,
59}
60
61impl VolcanoClient {
62    /// Create a new Volcano client from environment
63    pub fn from_env() -> Result<Self, LlmError> {
64        let api_key = std::env::var("VOLCANO_API_KEY").map_err(|_| LlmError::AuthError)?;
65
66        Ok(Self::new(api_key))
67    }
68
69    /// Create a new Volcano client with explicit API key
70    pub fn new(api_key: impl Into<String>) -> Self {
71        let api_key = api_key.into();
72
73        Self {
74            http_client: reqwest::Client::new(),
75            base_url: "https://api.volcano.ai/v1".to_string(),
76            credentials: Box::new(ApiKeyCredentials::with_header(api_key, "Authorization")),
77        }
78    }
79
80    /// Create with custom base URL
81    pub fn with_base_url(mut self, url: impl Into<String>) -> Self {
82        self.base_url = url.into();
83        self
84    }
85
86    /// Convert OpenAI format to Volcano format
87    fn convert_request(&self, request: ChatCompletionRequest) -> VolcanoRequest {
88        let messages = request
89            .messages
90            .into_iter()
91            .filter_map(|msg| match msg {
92                Message::System { content, .. } => Some(VolcanoMessage {
93                    role: "system".to_string(),
94                    content,
95                }),
96                Message::User { content, .. } => Some(VolcanoMessage {
97                    role: "user".to_string(),
98                    content,
99                }),
100                Message::Assistant { content, .. } => content.map(|content| VolcanoMessage {
101                    role: "assistant".to_string(),
102                    content,
103                }),
104                _ => None,
105            })
106            .collect();
107
108        VolcanoRequest {
109            model: request.model,
110            messages,
111            max_tokens: request.max_tokens,
112            temperature: request.temperature,
113            stream: request.stream,
114        }
115    }
116
117    /// Convert Volcano response to OpenAI format
118    fn convert_response(&self, response: VolcanoResponse) -> ChatCompletionResponse {
119        let choices = response
120            .choices
121            .into_iter()
122            .map(|choice| Choice {
123                index: choice.index,
124                message: Message::Assistant {
125                    content: Some(choice.message.content),
126                    refusal: None,
127                    tool_calls: None,
128                },
129                finish_reason: choice.finish_reason,
130            })
131            .collect();
132
133        ChatCompletionResponse {
134            id: response.id,
135            object: "chat.completion".to_string(),
136            created: chrono::Utc::now().timestamp(),
137            model: response.model,
138            choices,
139            usage: Some(Usage {
140                prompt_tokens: response.usage.prompt_tokens,
141                completion_tokens: response.usage.completion_tokens,
142                total_tokens: response.usage.total_tokens,
143            }),
144        }
145    }
146
147    async fn make_request(
148        &self,
149        request: ChatCompletionRequest,
150    ) -> Result<ChatCompletionResponse, LlmError> {
151        let volcano_req = self.convert_request(request);
152        let url = format!("{}/chat/completions", self.base_url);
153
154        let mut req = self
155            .http_client
156            .post(&url)
157            .json(&volcano_req)
158            .build()
159            .map_err(|e| LlmError::HttpError(e.to_string()))?;
160
161        self.credentials.apply(&mut req)?;
162
163        let response = self
164            .http_client
165            .execute(req)
166            .await
167            .map_err(|e| LlmError::HttpError(e.to_string()))?;
168
169        if !response.status().is_success() {
170            let status = response.status().as_u16();
171            let text = response.text().await.unwrap_or_default();
172            return Err(LlmError::ApiError {
173                status,
174                message: text,
175            });
176        }
177
178        let volcano_resp: VolcanoResponse = response
179            .json()
180            .await
181            .map_err(|e| LlmError::HttpError(e.to_string()))?;
182
183        Ok(self.convert_response(volcano_resp))
184    }
185}
186
187#[async_trait::async_trait]
188impl Provider for VolcanoClient {
189    async fn chat_completion(
190        &self,
191        request: ChatCompletionRequest,
192    ) -> Result<ChatCompletionResponse, LlmError> {
193        self.make_request(request).await
194    }
195
196    async fn embeddings(&self, _request: EmbeddingRequest) -> Result<EmbeddingResponse, LlmError> {
197        Err(LlmError::ProviderError(
198            "Volcano does not support embeddings".to_string(),
199        ))
200    }
201    fn provider_name(&self) -> &'static str {
202        "volcano"
203    }
204}
205
206#[cfg(test)]
207mod tests {
208    use super::*;
209
210    #[test]
211    fn test_volcano_client_creation() {
212        let client = VolcanoClient::new("test-key");
213        assert_eq!(client.provider_name(), "volcano");
214    }
215
216    #[test]
217    fn test_request_conversion() {
218        let client = VolcanoClient::new("test-key");
219
220        let request = ChatCompletionRequest {
221            model: "volcano-1".to_string(),
222            messages: vec![
223                Message::System {
224                    content: "You are a helpful assistant".to_string(),
225                    name: None,
226                },
227                Message::User {
228                    content: "Hello!".to_string(),
229                    name: None,
230                },
231            ],
232            temperature: Some(0.7),
233            max_tokens: Some(100),
234            stream: None,
235            top_p: None,
236            frequency_penalty: None,
237            presence_penalty: None,
238            stop: None,
239            user: None,
240            tools: None,
241            tool_choice: None,
242            response_format: None,
243        };
244
245        let volcano_req = client.convert_request(request);
246
247        assert_eq!(volcano_req.model, "volcano-1");
248        assert_eq!(volcano_req.messages.len(), 2);
249        assert_eq!(volcano_req.messages[0].role, "system");
250        assert_eq!(volcano_req.messages[1].role, "user");
251        assert_eq!(volcano_req.temperature, Some(0.7));
252        assert_eq!(volcano_req.max_tokens, Some(100));
253    }
254}
255