llm_link/llm/
chat.rs

1use super::Client;
2use crate::llm::types::{Response, Usage};
3use anyhow::{anyhow, Result};
4use llm_connector::types::ChatRequest;
5
6impl Client {
7    /// Send a non-streaming chat request to the LLM
8    pub async fn chat(
9        &self,
10        model: &str,
11        messages: Vec<llm_connector::types::Message>,
12        tools: Option<Vec<llm_connector::types::Tool>>,
13    ) -> Result<Response> {
14        // Messages are already in llm-connector format
15        let request = ChatRequest {
16            model: model.to_string(),
17            messages,
18            tools,
19            ..Default::default()
20        };
21
22        let response = self.llm_client.chat(&request).await
23            .map_err(|e| anyhow!("LLM connector error: {}", e))?;
24
25        // Debug: log the raw response
26        tracing::info!("📦 Raw LLM response: {:?}", response);
27        tracing::info!("📦 Raw LLM response choices: {}", response.choices.len());
28        if let Some(choice) = response.choices.get(0) {
29            tracing::info!("📦 Message content: '{}'", choice.message.content_as_text());
30            tracing::info!("📦 Message reasoning_content: {:?}", choice.message.reasoning_content);
31            tracing::info!("📦 Message reasoning: {:?}", choice.message.reasoning);
32        } else {
33            tracing::warn!("⚠️ No choices in response!");
34        }
35
36        // Extract content and usage information
37        let (prompt_tokens, completion_tokens, total_tokens) = response.get_usage_safe();
38
39        // Extract content and tool_calls from choices[0].message or response.content
40        let (content, tool_calls) = if let Some(choice) = response.choices.get(0) {
41            let msg = &choice.message;
42
43            // Extract content (could be in content, reasoning_content, reasoning, etc.)
44            let content = if msg.is_text_only() && !msg.content_as_text().is_empty() {
45                msg.content_as_text()
46            } else if let Some(reasoning) = &msg.reasoning_content {
47                reasoning.clone()
48            } else if let Some(reasoning) = &msg.reasoning {
49                reasoning.clone()
50            } else {
51                String::new()
52            };
53
54            // Extract tool_calls if present
55            let tool_calls = msg.tool_calls.as_ref()
56                .and_then(|tc| serde_json::to_value(tc).ok());
57
58            (content, tool_calls)
59        } else if !response.content.is_empty() {
60            // Fallback: some providers (like Aliyun in llm-connector 0.4.16)
61            // put content directly in response.content instead of choices
62            tracing::info!("📦 Using response.content: '{}'", response.content);
63            (response.content.clone(), None)
64        } else {
65            (String::new(), None)
66        };
67
68        Ok(Response {
69            content,
70            model: response.model,
71            usage: Usage {
72                prompt_tokens,
73                completion_tokens,
74                total_tokens,
75            },
76            tool_calls,
77        })
78    }
79}
80