llm_connector/providers/
zhipu.rs

1//! 智谱GLM服务提供商实现 - V2架构
2//!
3//! 这个模块提供智谱GLM服务的完整实现,支持原生格式和OpenAI兼容格式。
4
5use crate::core::{GenericProvider, HttpClient, Protocol};
6use crate::error::LlmConnectorError;
7use crate::types::{ChatRequest, ChatResponse, Role, Tool, ToolChoice, Choice, Message as TypeMessage};
8
9use serde::{Deserialize, Serialize};
10use std::collections::HashMap;
11
12/// 从 Zhipu 响应中提取推理内容
13///
14/// Zhipu GLM-Z1 等推理模型将推理过程嵌入在 content 中,使用标记分隔:
15/// - `###Thinking` 标记推理过程开始
16/// - `###Response` 标记最终答案开始
17///
18/// # 参数
19/// - `content`: 原始 content 字符串
20///
21/// # 返回
22/// - `(reasoning_content, final_content)`: 推理内容和最终答案
23fn extract_zhipu_reasoning_content(content: &str) -> (Option<String>, String) {
24    // 检查是否包含推理标记
25    if content.contains("###Thinking") && content.contains("###Response") {
26        // 分离推理内容和答案
27        let parts: Vec<&str> = content.split("###Response").collect();
28        if parts.len() >= 2 {
29            let thinking = parts[0]
30                .replace("###Thinking", "")
31                .trim()
32                .to_string();
33            let response = parts[1..].join("###Response").trim().to_string();
34
35            if !thinking.is_empty() {
36                return (Some(thinking), response);
37            }
38        }
39    }
40
41    // 如果没有推理标记,返回原始内容
42    (None, content.to_string())
43}
44
45/// Zhipu 流式响应处理阶段
46#[cfg(feature = "streaming")]
47#[derive(Debug, Clone, PartialEq)]
48enum ZhipuStreamPhase {
49    /// 初始状态,等待检测是否为推理模型
50    Initial,
51    /// 在推理阶段(###Thinking 之后,###Response 之前)
52    InThinking,
53    /// 在答案阶段(###Response 之后)
54    InResponse,
55}
56
57/// Zhipu 流式响应状态机
58#[cfg(feature = "streaming")]
59struct ZhipuStreamState {
60    /// 缓冲区,用于累积内容
61    buffer: String,
62    /// 当前处理阶段
63    phase: ZhipuStreamPhase,
64}
65
66#[cfg(feature = "streaming")]
67impl ZhipuStreamState {
68    fn new() -> Self {
69        Self {
70            buffer: String::new(),
71            phase: ZhipuStreamPhase::Initial,
72        }
73    }
74
75    /// 处理流式内容增量
76    ///
77    /// # 返回
78    /// - `(reasoning_delta, content_delta)`: 推理内容增量和答案内容增量
79    fn process(&mut self, delta_content: &str) -> (Option<String>, Option<String>) {
80        self.buffer.push_str(delta_content);
81
82        match self.phase {
83            ZhipuStreamPhase::Initial => {
84                // 检测是否包含 ###Thinking 标记
85                if self.buffer.contains("###Thinking") {
86                    // 移除标记并进入推理阶段
87                    self.buffer = self.buffer.replace("###Thinking", "").trim_start().to_string();
88                    self.phase = ZhipuStreamPhase::InThinking;
89
90                    // 检查是否立即包含 ###Response(完整推理在一个块中)
91                    if self.buffer.contains("###Response") {
92                        return self.handle_response_marker();
93                    }
94
95                    // 返回当前缓冲区作为推理内容
96                    let reasoning = self.buffer.clone();
97                    self.buffer.clear();
98                    (Some(reasoning), None)
99                } else {
100                    // 不是推理模型,直接返回内容
101                    let content = self.buffer.clone();
102                    self.buffer.clear();
103                    (None, Some(content))
104                }
105            }
106            ZhipuStreamPhase::InThinking => {
107                // 检测是否包含 ###Response 标记
108                if self.buffer.contains("###Response") {
109                    self.handle_response_marker()
110                } else {
111                    // 继续累积推理内容
112                    let reasoning = self.buffer.clone();
113                    self.buffer.clear();
114                    (Some(reasoning), None)
115                }
116            }
117            ZhipuStreamPhase::InResponse => {
118                // 在答案阶段,直接返回内容
119                let content = self.buffer.clone();
120                self.buffer.clear();
121                (None, Some(content))
122            }
123        }
124    }
125
126    /// 处理 ###Response 标记
127    fn handle_response_marker(&mut self) -> (Option<String>, Option<String>) {
128        let parts: Vec<&str> = self.buffer.split("###Response").collect();
129        if parts.len() >= 2 {
130            // 推理部分(###Response 之前)
131            let thinking = parts[0].trim();
132            let reasoning = if !thinking.is_empty() {
133                Some(thinking.to_string())
134            } else {
135                None
136            };
137
138            // 答案部分(###Response 之后)
139            let answer = parts[1..].join("###Response").trim_start().to_string();
140            self.buffer = String::new();
141            self.phase = ZhipuStreamPhase::InResponse;
142
143            let content = if !answer.is_empty() {
144                Some(answer)
145            } else {
146                None
147            };
148
149            (reasoning, content)
150        } else {
151            // 不应该发生,但为了安全
152            (None, None)
153        }
154    }
155}
156
157// ============================================================================
158// Zhipu Protocol Definition (Private)
159// ============================================================================
160
161/// 智谱GLM私有协议实现
162///
163/// 智谱支持OpenAI兼容格式,但有自己的认证和错误处理。
164/// 由于这是私有协议,定义在provider内部而不是公开的protocols模块中。
165#[derive(Clone, Debug)]
166pub struct ZhipuProtocol {
167    api_key: String,
168    use_openai_format: bool,
169}
170
171impl ZhipuProtocol {
172    /// 创建新的智谱协议实例 (使用原生格式)
173    pub fn new(api_key: &str) -> Self {
174        Self {
175            api_key: api_key.to_string(),
176            use_openai_format: false,
177        }
178    }
179
180    /// 创建使用OpenAI兼容格式的智谱协议实例
181    pub fn new_openai_compatible(api_key: &str) -> Self {
182        Self {
183            api_key: api_key.to_string(),
184            use_openai_format: true,
185        }
186    }
187
188    /// 获取API密钥
189    pub fn api_key(&self) -> &str {
190        &self.api_key
191    }
192
193    /// 是否使用OpenAI兼容格式
194    pub fn is_openai_compatible(&self) -> bool {
195        self.use_openai_format
196    }
197}
198
199#[async_trait::async_trait]
200impl Protocol for ZhipuProtocol {
201    type Request = ZhipuRequest;
202    type Response = ZhipuResponse;
203
204    fn name(&self) -> &str {
205        "zhipu"
206    }
207
208    fn chat_endpoint(&self, base_url: &str) -> String {
209        format!("{}/api/paas/v4/chat/completions", base_url)
210    }
211
212    fn auth_headers(&self) -> Vec<(String, String)> {
213        vec![
214            (
215                "Authorization".to_string(),
216                format!("Bearer {}", self.api_key),
217            ),
218            // 注意: Content-Type 由 HttpClient::post() 的 .json() 方法自动设置
219            // 不要在这里重复设置,否则可能导致重复头部错误
220        ]
221    }
222
223    fn build_request(&self, request: &ChatRequest) -> Result<Self::Request, LlmConnectorError> {
224        // 智谱使用OpenAI兼容格式
225        let messages: Vec<ZhipuMessage> = request
226            .messages
227            .iter()
228            .map(|msg| ZhipuMessage {
229                role: match msg.role {
230                    Role::System => "system".to_string(),
231                    Role::User => "user".to_string(),
232                    Role::Assistant => "assistant".to_string(),
233                    Role::Tool => "tool".to_string(),
234                },
235                // Zhipu 使用纯文本格式
236                content: msg.content_as_text(),
237                tool_calls: msg.tool_calls.as_ref().map(|calls| {
238                    calls.iter().map(|c| serde_json::to_value(c).unwrap_or_default()).collect()
239                }),
240                tool_call_id: msg.tool_call_id.clone(),
241                name: msg.name.clone(),
242            })
243            .collect();
244
245        Ok(ZhipuRequest {
246            model: request.model.clone(),
247            messages,
248            max_tokens: request.max_tokens,
249            temperature: request.temperature,
250            top_p: request.top_p,
251            stream: request.stream,
252            tools: request.tools.clone(),
253            tool_choice: request.tool_choice.clone(),
254        })
255    }
256
257    fn parse_response(&self, response: &str) -> Result<ChatResponse, LlmConnectorError> {
258        let parsed: ZhipuResponse = serde_json::from_str(response).map_err(|e| {
259            LlmConnectorError::InvalidRequest(format!("Failed to parse response: {}", e))
260        })?;
261
262        if let Some(choices) = parsed.choices {
263            if let Some(first_choice) = choices.first() {
264                // 转换 ZhipuMessage 到 TypeMessage
265                // 提取推理内容(如果存在)
266                let (reasoning_content, final_content) =
267                    extract_zhipu_reasoning_content(&first_choice.message.content);
268
269                let type_message = TypeMessage {
270                    role: match first_choice.message.role.as_str() {
271                        "system" => Role::System,
272                        "user" => Role::User,
273                        "assistant" => Role::Assistant,
274                        "tool" => Role::Tool,
275                        _ => Role::Assistant,
276                    },
277                    content: vec![crate::types::MessageBlock::text(&final_content)],
278                    tool_calls: first_choice.message.tool_calls.as_ref().map(|calls| {
279                        calls.iter().filter_map(|v| {
280                            serde_json::from_value(v.clone()).ok()
281                        }).collect()
282                    }),
283                    ..Default::default()
284                };
285
286                let choice = Choice {
287                    index: first_choice.index.unwrap_or(0),
288                    message: type_message,
289                    finish_reason: first_choice.finish_reason.clone(),
290                    logprobs: None,
291                };
292
293                return Ok(ChatResponse {
294                    id: parsed.id.unwrap_or_else(|| "unknown".to_string()),
295                    object: "chat.completion".to_string(),
296                    created: parsed.created.unwrap_or(0),
297                    model: parsed.model.unwrap_or_else(|| "unknown".to_string()),
298                    content: final_content,
299                    reasoning_content,
300                    choices: vec![choice],
301                    usage: parsed.usage.and_then(|v| serde_json::from_value(v).ok()),
302                    system_fingerprint: None,
303                });
304            }
305        }
306
307        Err(LlmConnectorError::InvalidRequest(
308            "Empty or invalid response".to_string(),
309        ))
310    }
311
312    fn map_error(&self, status: u16, body: &str) -> LlmConnectorError {
313        LlmConnectorError::from_status_code(status, format!("Zhipu API error: {}", body))
314    }
315
316    /// 智谱专用流式解析器
317    ///
318    /// 智谱 API 使用单换行分隔 SSE 事件,而不是标准的双换行
319    /// 格式: data: {...}\n 而不是 data: {...}\n\n
320    #[cfg(feature = "streaming")]
321    async fn parse_stream_response(
322        &self,
323        response: reqwest::Response,
324    ) -> Result<crate::types::ChatStream, LlmConnectorError> {
325        use crate::types::StreamingResponse;
326        use futures_util::StreamExt;
327
328        let stream = response.bytes_stream();
329
330        let events_stream = stream
331            .scan(String::new(), |buffer, chunk_result| {
332                let mut out: Vec<Result<String, LlmConnectorError>> = Vec::new();
333                match chunk_result {
334                    Ok(chunk) => {
335                        let chunk_str = String::from_utf8_lossy(&chunk).replace("\r\n", "\n");
336                        buffer.push_str(&chunk_str);
337
338                        // 智谱使用单换行分隔每个 data: 行
339                        while let Some(newline_idx) = buffer.find('\n') {
340                            let line: String = buffer.drain(..newline_idx + 1).collect();
341                            let trimmed = line.trim();
342
343                            // 跳过空行
344                            if trimmed.is_empty() {
345                                continue;
346                            }
347
348                            // 提取 data: 后的内容
349                            if let Some(payload) = trimmed
350                                .strip_prefix("data: ")
351                                .or_else(|| trimmed.strip_prefix("data:"))
352                            {
353                                let payload = payload.trim();
354
355                                // 跳过 [DONE] 标记
356                                if payload == "[DONE]" {
357                                    continue;
358                                }
359
360                                // 跳过空 payload
361                                if payload.is_empty() {
362                                    continue;
363                                }
364
365                                out.push(Ok(payload.to_string()));
366                            }
367                        }
368                    }
369                    Err(e) => {
370                        out.push(Err(LlmConnectorError::NetworkError(e.to_string())));
371                    }
372                }
373                std::future::ready(Some(out))
374            })
375            .flat_map(futures_util::stream::iter);
376
377        // 将 JSON 字符串流转换为 StreamingResponse 流
378        // 使用状态机处理 Zhipu 的 ###Thinking 和 ###Response 标记
379        let response_stream = events_stream.scan(
380            ZhipuStreamState::new(),
381            |state, result| {
382                let processed = result.and_then(|json_str| {
383                    let mut response = serde_json::from_str::<StreamingResponse>(&json_str).map_err(|e| {
384                        LlmConnectorError::ParseError(format!(
385                            "Failed to parse Zhipu streaming response: {}. JSON: {}",
386                            e, json_str
387                        ))
388                    })?;
389
390                    // 处理推理内容标记
391                    if let Some(first_choice) = response.choices.first_mut() {
392                        if let Some(ref delta_content) = first_choice.delta.content {
393                            // 使用状态机处理内容
394                            let (reasoning_delta, content_delta) = state.process(delta_content);
395
396                            // 更新 delta
397                            if let Some(reasoning) = reasoning_delta {
398                                first_choice.delta.reasoning_content = Some(reasoning);
399                            }
400
401                            if let Some(content) = content_delta {
402                                first_choice.delta.content = Some(content.clone());
403                                // 同时更新 response.content
404                                response.content = content;
405                            } else {
406                                // 如果没有内容增量,清空 delta.content
407                                first_choice.delta.content = None;
408                                response.content = String::new();
409                            }
410                        }
411                    }
412
413                    Ok(response)
414                });
415
416                std::future::ready(Some(processed))
417            }
418        );
419
420        Ok(Box::pin(response_stream))
421    }
422}
423
424// 智谱专用数据结构 (OpenAI兼容格式)
425#[derive(Debug, Clone, Serialize, Deserialize)]
426pub struct ZhipuRequest {
427    pub model: String,
428    pub messages: Vec<ZhipuMessage>,
429    #[serde(skip_serializing_if = "Option::is_none")]
430    pub max_tokens: Option<u32>,
431    #[serde(skip_serializing_if = "Option::is_none")]
432    pub temperature: Option<f32>,
433    #[serde(skip_serializing_if = "Option::is_none")]
434    pub top_p: Option<f32>,
435    #[serde(skip_serializing_if = "Option::is_none")]
436    pub stream: Option<bool>,
437    #[serde(skip_serializing_if = "Option::is_none")]
438    pub tools: Option<Vec<Tool>>,
439    #[serde(skip_serializing_if = "Option::is_none")]
440    pub tool_choice: Option<ToolChoice>,
441}
442
443#[derive(Debug, Clone, Serialize, Deserialize)]
444pub struct ZhipuMessage {
445    pub role: String,
446    #[serde(default)]
447    pub content: String,
448    #[serde(skip_serializing_if = "Option::is_none")]
449    pub tool_calls: Option<Vec<serde_json::Value>>,
450    #[serde(skip_serializing_if = "Option::is_none")]
451    pub tool_call_id: Option<String>,
452    #[serde(skip_serializing_if = "Option::is_none")]
453    pub name: Option<String>,
454}
455
456#[derive(Debug, Clone, Serialize, Deserialize)]
457pub struct ZhipuResponse {
458    pub id: Option<String>,
459    pub created: Option<u64>,
460    pub model: Option<String>,
461    pub choices: Option<Vec<ZhipuChoice>>,
462    pub usage: Option<serde_json::Value>,
463}
464
465#[derive(Debug, Clone, Serialize, Deserialize)]
466pub struct ZhipuChoice {
467    pub index: Option<u32>,
468    pub message: ZhipuMessage,
469    pub finish_reason: Option<String>,
470}
471
472// ============================================================================
473// Zhipu Provider Implementation
474// ============================================================================
475
476/// 智谱GLM服务提供商类型
477pub type ZhipuProvider = GenericProvider<ZhipuProtocol>;
478
479/// 创建智谱GLM服务提供商 (使用原生格式)
480///
481/// # 参数
482/// - `api_key`: 智谱GLM API密钥
483///
484/// # 返回
485/// 配置好的智谱服务提供商实例
486///
487/// # 示例
488/// ```rust,no_run
489/// use llm_connector::providers::zhipu;
490///
491/// let provider = zhipu("your-api-key").unwrap();
492/// ```
493pub fn zhipu(api_key: &str) -> Result<ZhipuProvider, LlmConnectorError> {
494    zhipu_with_config(api_key, false, None, None, None)
495}
496
497/// 创建智谱GLM服务提供商 (使用OpenAI兼容格式)
498///
499/// # 参数
500/// - `api_key`: 智谱GLM API密钥
501///
502/// # 返回
503/// 配置好的智谱服务提供商实例 (OpenAI兼容模式)
504///
505/// # 示例
506/// ```rust,no_run
507/// use llm_connector::providers::zhipu_openai_compatible;
508///
509/// let provider = zhipu_openai_compatible("your-api-key").unwrap();
510/// ```
511pub fn zhipu_openai_compatible(api_key: &str) -> Result<ZhipuProvider, LlmConnectorError> {
512    zhipu_with_config(api_key, true, None, None, None)
513}
514
515/// 创建带有自定义配置的智谱GLM服务提供商
516///
517/// # 参数
518/// - `api_key`: API密钥
519/// - `openai_compatible`: 是否使用OpenAI兼容格式
520/// - `base_url`: 自定义基础URL (可选)
521/// - `timeout_secs`: 超时时间(秒) (可选)
522/// - `proxy`: 代理URL (可选)
523///
524/// # 示例
525/// ```rust,no_run
526/// use llm_connector::providers::zhipu_with_config;
527///
528/// let provider = zhipu_with_config(
529///     "your-api-key",
530///     true, // 使用OpenAI兼容格式
531///     None, // 使用默认URL
532///     Some(60), // 60秒超时
533///     None
534/// ).unwrap();
535/// ```
536pub fn zhipu_with_config(
537    api_key: &str,
538    openai_compatible: bool,
539    base_url: Option<&str>,
540    timeout_secs: Option<u64>,
541    proxy: Option<&str>,
542) -> Result<ZhipuProvider, LlmConnectorError> {
543    // 创建协议实例
544    let protocol = if openai_compatible {
545        ZhipuProtocol::new_openai_compatible(api_key)
546    } else {
547        ZhipuProtocol::new(api_key)
548    };
549
550    // 创建HTTP客户端
551    let client = HttpClient::with_config(
552        base_url.unwrap_or("https://open.bigmodel.cn"),
553        timeout_secs,
554        proxy,
555    )?;
556
557    // 添加认证头
558    let auth_headers: HashMap<String, String> = protocol.auth_headers().into_iter().collect();
559    let client = client.with_headers(auth_headers);
560
561    // 创建通用提供商
562    Ok(GenericProvider::new(protocol, client))
563}
564
565/// 创建带有自定义超时的智谱GLM服务提供商
566///
567/// # 参数
568/// - `api_key`: API密钥
569/// - `timeout_secs`: 超时时间(秒)
570///
571/// # 示例
572/// ```rust,no_run
573/// use llm_connector::providers::zhipu_with_timeout;
574///
575/// // 设置120秒超时
576/// let provider = zhipu_with_timeout("your-api-key", 120).unwrap();
577/// ```
578pub fn zhipu_with_timeout(
579    api_key: &str,
580    timeout_secs: u64,
581) -> Result<ZhipuProvider, LlmConnectorError> {
582    zhipu_with_config(api_key, true, None, Some(timeout_secs), None)
583}
584
585/// 创建用于智谱GLM企业版的服务提供商
586///
587/// # 参数
588/// - `api_key`: 企业版API密钥
589/// - `enterprise_endpoint`: 企业版端点URL
590///
591/// # 示例
592/// ```rust,no_run
593/// use llm_connector::providers::zhipu_enterprise;
594///
595/// let provider = zhipu_enterprise(
596///     "your-enterprise-key",
597///     "https://enterprise.bigmodel.cn"
598/// ).unwrap();
599/// ```
600pub fn zhipu_enterprise(
601    api_key: &str,
602    enterprise_endpoint: &str,
603) -> Result<ZhipuProvider, LlmConnectorError> {
604    zhipu_with_config(api_key, true, Some(enterprise_endpoint), None, None)
605}
606
607/// 验证智谱GLM API密钥格式
608///
609/// # 参数
610/// - `api_key`: 要验证的API密钥
611///
612/// # 返回
613/// 如果格式看起来正确返回true,否则返回false
614///
615/// # 示例
616/// ```rust
617/// use llm_connector::providers::validate_zhipu_key;
618///
619/// assert!(validate_zhipu_key("your-valid-key"));
620/// assert!(!validate_zhipu_key(""));
621/// ```
622pub fn validate_zhipu_key(api_key: &str) -> bool {
623    !api_key.is_empty() && api_key.len() > 10
624}
625
626#[cfg(test)]
627mod tests {
628    use super::*;
629
630    #[test]
631    fn test_zhipu_provider_creation() {
632        let provider = zhipu("test-key");
633        assert!(provider.is_ok());
634
635        let provider = provider.unwrap();
636        assert_eq!(provider.protocol().name(), "zhipu");
637    }
638
639    #[test]
640    fn test_zhipu_openai_compatible() {
641        let provider = zhipu_openai_compatible("test-key");
642        assert!(provider.is_ok());
643
644        let provider = provider.unwrap();
645        assert_eq!(provider.protocol().name(), "zhipu");
646        assert!(provider.protocol().is_openai_compatible());
647    }
648
649    #[test]
650    fn test_zhipu_with_config() {
651        let provider = zhipu_with_config(
652            "test-key",
653            true,
654            Some("https://custom.bigmodel.cn"),
655            Some(60),
656            None,
657        );
658        assert!(provider.is_ok());
659
660        let provider = provider.unwrap();
661        assert_eq!(provider.client().base_url(), "https://custom.bigmodel.cn");
662        assert!(provider.protocol().is_openai_compatible());
663    }
664
665    #[test]
666    fn test_zhipu_with_timeout() {
667        let provider = zhipu_with_timeout("test-key", 120);
668        assert!(provider.is_ok());
669    }
670
671    #[test]
672    fn test_zhipu_enterprise() {
673        let provider = zhipu_enterprise("test-key", "https://enterprise.bigmodel.cn");
674        assert!(provider.is_ok());
675
676        let provider = provider.unwrap();
677        assert_eq!(
678            provider.client().base_url(),
679            "https://enterprise.bigmodel.cn"
680        );
681    }
682
683    #[test]
684    fn test_validate_zhipu_key() {
685        assert!(validate_zhipu_key("valid-test-key"));
686        assert!(validate_zhipu_key("another-valid-key-12345"));
687        assert!(!validate_zhipu_key("short"));
688        assert!(!validate_zhipu_key(""));
689    }
690
691    #[test]
692    fn test_extract_zhipu_reasoning_content() {
693        // 测试包含推理内容的情况
694        let content_with_thinking = "###Thinking\n这是推理过程\n分析步骤1\n分析步骤2\n###Response\n这是最终答案";
695        let (reasoning, answer) = extract_zhipu_reasoning_content(content_with_thinking);
696        assert!(reasoning.is_some());
697        assert_eq!(reasoning.unwrap(), "这是推理过程\n分析步骤1\n分析步骤2");
698        assert_eq!(answer, "这是最终答案");
699
700        // 测试不包含推理内容的情况
701        let content_without_thinking = "这只是一个普通的回答";
702        let (reasoning, answer) = extract_zhipu_reasoning_content(content_without_thinking);
703        assert!(reasoning.is_none());
704        assert_eq!(answer, "这只是一个普通的回答");
705
706        // 测试只有 Thinking 没有 Response 的情况
707        let content_only_thinking = "###Thinking\n这是推理过程";
708        let (reasoning, answer) = extract_zhipu_reasoning_content(content_only_thinking);
709        assert!(reasoning.is_none());
710        assert_eq!(answer, "###Thinking\n这是推理过程");
711
712        // 测试空推理内容的情况
713        let content_empty_thinking = "###Thinking\n\n###Response\n答案";
714        let (reasoning, answer) = extract_zhipu_reasoning_content(content_empty_thinking);
715        assert!(reasoning.is_none());
716        assert_eq!(answer, "###Thinking\n\n###Response\n答案");
717    }
718
719    #[cfg(feature = "streaming")]
720    #[test]
721    fn test_zhipu_stream_state() {
722        // 测试推理模型流式响应
723        let mut state = ZhipuStreamState::new();
724
725        // 第一个块: ###Thinking
726        let (reasoning, content) = state.process("###Thinking\n开始");
727        assert_eq!(reasoning, Some("开始".to_string()));
728        assert_eq!(content, None);
729
730        // 第二个块: 推理过程
731        let (reasoning, content) = state.process("推理");
732        assert_eq!(reasoning, Some("推理".to_string()));
733        assert_eq!(content, None);
734
735        // 第三个块: ###Response
736        let (reasoning, content) = state.process("过程\n###Response\n答案");
737        assert_eq!(reasoning, Some("过程".to_string()));
738        assert_eq!(content, Some("答案".to_string()));
739
740        // 第四个块: 继续答案
741        let (reasoning, content) = state.process("继续");
742        assert_eq!(reasoning, None);
743        assert_eq!(content, Some("继续".to_string()));
744    }
745
746    #[cfg(feature = "streaming")]
747    #[test]
748    fn test_zhipu_stream_state_non_reasoning() {
749        // 测试非推理模型流式响应
750        let mut state = ZhipuStreamState::new();
751
752        // 第一个块: 普通内容
753        let (reasoning, content) = state.process("这是");
754        assert_eq!(reasoning, None);
755        assert_eq!(content, Some("这是".to_string()));
756
757        // 第二个块: 继续内容
758        let (reasoning, content) = state.process("普通回答");
759        assert_eq!(reasoning, None);
760        assert_eq!(content, Some("普通回答".to_string()));
761    }
762
763    #[cfg(feature = "streaming")]
764    #[test]
765    fn test_zhipu_stream_state_complete_in_one_chunk() {
766        // 测试完整推理在一个块中
767        let mut state = ZhipuStreamState::new();
768
769        let (reasoning, content) = state.process("###Thinking\n推理过程\n###Response\n答案");
770        assert_eq!(reasoning, Some("推理过程".to_string()));
771        assert_eq!(content, Some("答案".to_string()));
772    }
773}