1use crate::normalizer::Response;
2use anyhow::Result;
3use llm_connector::types::{Function, Message as LlmMessage, MessageBlock, Role as LlmRole, Tool};
4use serde_json::Value;
5
6#[allow(dead_code)]
8pub fn openai_messages_to_llm(messages: Vec<Value>) -> Result<Vec<LlmMessage>> {
9 let mut llm_messages = Vec::with_capacity(messages.len());
10
11 for msg in messages {
12 let role = msg["role"]
13 .as_str()
14 .ok_or_else(|| anyhow::anyhow!("Missing role"))?;
15
16 let llm_role = match role {
18 "system" => LlmRole::System,
19 "user" => LlmRole::User,
20 "assistant" => LlmRole::Assistant,
21 "tool" => LlmRole::Tool,
22 _ => return Err(anyhow::anyhow!("Unsupported role: {}", role)),
23 };
24
25 let content = if msg["content"].is_null() {
27 String::new()
29 } else if let Some(content_str) = msg["content"].as_str() {
30 content_str.to_string()
32 } else if let Some(content_array) = msg["content"].as_array() {
33 let mut text_parts = Vec::with_capacity(content_array.len());
36 for part in content_array {
37 if let Some(text) = part["text"].as_str() {
38 text_parts.push(text);
39 } else if let Some(text) = part.as_str() {
40 text_parts.push(text);
42 }
43 }
44 if text_parts.is_empty() {
45 return Err(anyhow::anyhow!("Content array has no text parts"));
46 }
47 text_parts.join("\n")
48 } else {
49 return Err(anyhow::anyhow!(
50 "Content must be string, array, or null, got: {:?}",
51 msg["content"]
52 ));
53 };
54
55 let tool_calls = if role == "assistant" {
57 msg.get("tool_calls")
58 .and_then(|tc| serde_json::from_value(tc.clone()).ok())
59 } else {
60 None
61 };
62
63 let tool_call_id = if role == "tool" {
65 let tool_call_id = msg.get("tool_call_id")
67 .and_then(|id| id.as_str())
68 .map(|s| s.to_string());
69
70 let tool_call_id = if tool_call_id.is_none() {
72 msg.get("tool_name")
73 .and_then(|name| name.as_str())
74 .map(|s| format!("zed_tool_{}", s)) } else {
76 tool_call_id
77 };
78
79 tracing::debug!("🔧 Tool message: tool_call_id={:?}, tool_name={:?}",
80 tool_call_id, msg.get("tool_name"));
81
82 tool_call_id
83 } else {
84 None
85 };
86
87 if role == "tool" {
89 tracing::debug!("🔧 Converting tool message: role={}, tool_call_id={:?}, content_len={}",
90 role, tool_call_id, content.len());
91
92 if let Some(ref id) = tool_call_id {
94 if id.trim().is_empty() {
95 return Err(anyhow::anyhow!(
96 "Tool message has empty 'tool_call_id' field. Tool call ID must be a non-empty string."
97 ));
98 }
99 tracing::debug!("✅ Tool message validation passed: tool_call_id='{}'", id);
100 }
101 }
102
103 llm_messages.push(LlmMessage {
104 role: llm_role,
105 content: vec![MessageBlock::Text { text: content }],
106 name: None,
107 tool_calls,
108 tool_call_id,
109 reasoning_content: None,
110 reasoning: None,
111 thought: None,
112 thinking: None,
113 });
114 }
115
116 Ok(llm_messages)
117}
118
119#[allow(dead_code)]
121pub fn response_to_openai(response: Response) -> Value {
122 let mut message = serde_json::json!({
123 "role": "assistant",
124 "content": response.content
125 });
126
127 if let Some(tool_calls) = response.tool_calls {
129 message["tool_calls"] = tool_calls;
130 }
131
132 serde_json::json!({
133 "id": uuid::Uuid::new_v4().to_string(),
134 "object": "chat.completion",
135 "created": chrono::Utc::now().timestamp(),
136 "model": response.model,
137 "choices": [{
138 "index": 0,
139 "message": message,
140 "finish_reason": "stop"
141 }],
142 "usage": {
143 "prompt_tokens": response.usage.prompt_tokens,
144 "completion_tokens": response.usage.completion_tokens,
145 "total_tokens": response.usage.total_tokens
146 }
147 })
148}
149
150#[allow(dead_code)]
152pub fn response_to_ollama(response: Response) -> Value {
153 let mut message = serde_json::json!({
154 "role": "assistant",
155 "content": response.content
156 });
157
158 if let Some(tool_calls) = response.tool_calls {
159 message["tool_calls"] = tool_calls;
160 }
161
162 serde_json::json!({
163 "model": response.model,
164 "created_at": chrono::Utc::now().to_rfc3339(),
165 "message": message,
166 "done": true,
167 "total_duration": 0,
168 "load_duration": 0,
169 "prompt_eval_count": response.usage.prompt_tokens,
170 "prompt_eval_duration": 0,
171 "eval_count": response.usage.completion_tokens,
172 "eval_duration": 0
173 })
174}
175
176#[allow(dead_code)]
178pub fn response_to_ollama_from_minimax(minimax_response: Value) -> Value {
179 let content = minimax_response
181 .get("choices")
182 .and_then(|c| c.get(0))
183 .and_then(|c| c.get("message"))
184 .and_then(|m| m.get("content"))
185 .and_then(|c| c.as_str())
186 .unwrap_or("No response")
187 .to_string();
188
189 let model = minimax_response
190 .get("model")
191 .and_then(|m| m.as_str())
192 .unwrap_or("MiniMax-M2")
193 .to_string();
194
195 serde_json::json!({
196 "model": model,
197 "created_at": chrono::Utc::now().to_rfc3339(),
198 "message": {
199 "role": "assistant",
200 "content": content
201 },
202 "done": true,
203 "total_duration": 0,
204 "load_duration": 0,
205 "prompt_eval_count": 0,
206 "prompt_eval_duration": 0,
207 "eval_count": 0,
208 "eval_duration": 0
209 })
210}
211
212#[allow(dead_code)]
214pub fn openai_tools_to_llm(tools: Vec<Value>) -> Vec<Tool> {
215 tools
216 .into_iter()
217 .filter_map(|tool| {
218 let tool_type = tool.get("type")?.as_str()?.to_string();
219 let function = tool.get("function")?;
220
221 Some(Tool {
222 tool_type,
223 function: Function {
224 name: function.get("name")?.as_str()?.to_string(),
225 description: function
226 .get("description")
227 .and_then(|d| d.as_str())
228 .map(String::from),
229 parameters: function.get("parameters")?.clone(),
230 },
231 })
232 })
233 .collect()
234}
235
236#[allow(dead_code)]
238pub fn models_to_ollama(models: Vec<crate::normalizer::Model>) -> Vec<Value> {
239 models
240 .into_iter()
241 .map(|model| {
242 let family = model.id.split('-').next().unwrap_or("unknown");
243 serde_json::json!({
244 "name": model.id,
245 "model": model.id,
246 "modified_at": chrono::Utc::now().to_rfc3339(),
247 "size": 1000000,
248 "digest": format!("sha256:{}", "0".repeat(64)),
249 "details": {
250 "parent_model": "",
251 "format": "gguf",
252 "family": family,
253 "families": [family],
254 "parameter_size": "7B",
255 "quantization_level": "Q4_K_M"
256 },
257 "expires_at": null
258 })
259 })
260 .collect()
261}
262
263#[cfg(test)]
264mod tests {
265 use super::*;
266 use serde_json::json;
267
268 #[test]
269 fn test_tool_call_id_conversion() {
270 let messages = vec![
271 json!({
272 "role": "user",
273 "content": "What is the weather in Beijing?"
274 }),
275 json!({
276 "role": "assistant",
277 "content": null,
278 "tool_calls": [
279 {
280 "id": "call_123",
281 "type": "function",
282 "function": {
283 "name": "get_weather",
284 "arguments": "{\"location\": \"Beijing\"}"
285 }
286 }
287 ]
288 }),
289 json!({
290 "role": "tool",
291 "content": "The weather in Beijing is sunny, 25°C",
292 "tool_call_id": "call_123"
293 }),
294 json!({
295 "role": "user",
296 "content": "What about Shanghai?"
297 })
298 ];
299
300 let result = openai_messages_to_llm(messages);
301 assert!(result.is_ok());
302
303 let llm_messages = result.unwrap();
304 assert_eq!(llm_messages.len(), 4);
305
306 let tool_message = &llm_messages[2];
308 assert_eq!(tool_message.role, LlmRole::Tool);
309 assert_eq!(tool_message.tool_call_id, Some("call_123".to_string()));
310
311 let assistant_message = &llm_messages[1];
313 assert_eq!(assistant_message.role, LlmRole::Assistant);
314 assert!(assistant_message.tool_calls.is_some());
315 }
316
317 #[test]
318 fn test_missing_tool_call_id() {
319 let messages = vec![
320 json!({
321 "role": "tool",
322 "content": "Some tool response"
323 })
325 ];
326
327 let result = openai_messages_to_llm(messages);
328 assert!(result.is_ok());
329
330 let llm_messages = result.unwrap();
331 assert_eq!(llm_messages.len(), 1);
332
333 let tool_message = &llm_messages[0];
334 assert_eq!(tool_message.role, LlmRole::Tool);
335 assert_eq!(tool_message.tool_call_id, None); }
337
338 #[test]
339 fn test_empty_tool_call_id() {
340 let messages = vec![
341 json!({
342 "role": "tool",
343 "content": "Some tool response",
344 "tool_call_id": "" })
346 ];
347
348 let result = openai_messages_to_llm(messages);
349 assert!(result.is_err());
350
351 let error_msg = result.unwrap_err().to_string();
352 assert!(error_msg.contains("Tool message has empty 'tool_call_id' field"));
353 }
354}
355