1use super::Client;
2use crate::llm::types::{Response, Usage};
3use anyhow::{anyhow, Result};
4use llm_connector::types::ChatRequest;
5
6impl Client {
7 pub async fn chat(
9 &self,
10 model: &str,
11 messages: Vec<llm_connector::types::Message>,
12 tools: Option<Vec<llm_connector::types::Tool>>,
13 ) -> Result<Response> {
14 let request = ChatRequest {
16 model: model.to_string(),
17 messages,
18 tools,
19 ..Default::default()
20 };
21
22 let response = self.llm_client.chat(&request).await
23 .map_err(|e| anyhow!("LLM connector error: {}", e))?;
24
25 tracing::info!("📦 Raw LLM response: {:?}", response);
27 tracing::info!("📦 Raw LLM response choices: {}", response.choices.len());
28 if let Some(choice) = response.choices.get(0) {
29 tracing::info!("📦 Message content: '{}'", choice.message.content_as_text());
30 tracing::info!("📦 Message reasoning_content: {:?}", choice.message.reasoning_content);
31 tracing::info!("📦 Message reasoning: {:?}", choice.message.reasoning);
32 } else {
33 tracing::warn!("⚠️ No choices in response!");
34 }
35
36 let (prompt_tokens, completion_tokens, total_tokens) = response.get_usage_safe();
38
39 let (content, tool_calls) = if let Some(choice) = response.choices.get(0) {
41 let msg = &choice.message;
42
43 let content = if msg.is_text_only() && !msg.content_as_text().is_empty() {
45 msg.content_as_text()
46 } else if let Some(reasoning) = &msg.reasoning_content {
47 reasoning.clone()
48 } else if let Some(reasoning) = &msg.reasoning {
49 reasoning.clone()
50 } else {
51 String::new()
52 };
53
54 let tool_calls = msg.tool_calls.as_ref()
56 .and_then(|tc| serde_json::to_value(tc).ok());
57
58 (content, tool_calls)
59 } else if !response.content.is_empty() {
60 tracing::info!("📦 Using response.content: '{}'", response.content);
63 (response.content.clone(), None)
64 } else {
65 (String::new(), None)
66 };
67
68 Ok(Response {
69 content,
70 model: response.model,
71 usage: Usage {
72 prompt_tokens,
73 completion_tokens,
74 total_tokens,
75 },
76 tool_calls,
77 })
78 }
79}
80