a3s_code_core/llm/
zhipu.rs1use super::openai::OpenAiClient;
7use super::types::*;
8use super::LlmClient;
9use crate::retry::RetryConfig;
10use anyhow::Result;
11use async_trait::async_trait;
12use tokio::sync::mpsc;
13#[cfg(test)]
14use {super::http::HttpClient, std::sync::Arc};
15
16const GLM_BASE_URL: &str = "https://open.bigmodel.cn";
17const GLM_CHAT_PATH: &str = "/api/paas/v4/chat/completions";
18
19pub struct ZhipuClient(OpenAiClient);
21
22impl ZhipuClient {
23 pub fn new(api_key: String, model: String) -> Self {
24 Self(
25 OpenAiClient::new(api_key, model)
26 .with_provider_name("zhipu")
27 .with_base_url(GLM_BASE_URL.to_string())
28 .with_chat_completions_path(GLM_CHAT_PATH),
29 )
30 }
31
32 pub fn with_temperature(mut self, temperature: f32) -> Self {
33 self.0 = self.0.with_temperature(temperature);
34 self
35 }
36
37 pub fn with_max_tokens(mut self, max_tokens: usize) -> Self {
38 self.0 = self.0.with_max_tokens(max_tokens);
39 self
40 }
41
42 pub fn with_base_url(mut self, base_url: String) -> Self {
43 self.0 = self.0.with_base_url(base_url);
44 self
45 }
46
47 pub fn with_retry_config(mut self, retry_config: RetryConfig) -> Self {
48 self.0 = self.0.with_retry_config(retry_config);
49 self
50 }
51
52 #[cfg(test)]
53 pub fn with_http_client(mut self, http: Arc<dyn HttpClient>) -> Self {
54 self.0 = self.0.with_http_client(http);
55 self
56 }
57}
58
59#[async_trait]
60impl LlmClient for ZhipuClient {
61 async fn complete(
62 &self,
63 messages: &[Message],
64 system: Option<&str>,
65 tools: &[ToolDefinition],
66 ) -> Result<LlmResponse> {
67 self.0.complete(messages, system, tools).await
68 }
69
70 async fn complete_streaming(
71 &self,
72 messages: &[Message],
73 system: Option<&str>,
74 tools: &[ToolDefinition],
75 ) -> Result<mpsc::Receiver<StreamEvent>> {
76 self.0.complete_streaming(messages, system, tools).await
77 }
78}