llm_link/api/
convert.rs

1use crate::llm::Response;
2use anyhow::Result;
3use llm_connector::types::{Function, Message as LlmMessage, MessageBlock, Role as LlmRole, Tool};
4use serde_json::Value;
5
6/// Convert OpenAI messages format to llm-connector format
7pub fn openai_messages_to_llm(messages: Vec<Value>) -> Result<Vec<LlmMessage>> {
8    let mut llm_messages = Vec::new();
9
10    for msg in messages {
11        let role = msg["role"]
12            .as_str()
13            .ok_or_else(|| anyhow::anyhow!("Missing role"))?;
14
15        // Determine the role
16        let llm_role = match role {
17            "system" => LlmRole::System,
18            "user" => LlmRole::User,
19            "assistant" => LlmRole::Assistant,
20            "tool" => LlmRole::Tool,
21            _ => return Err(anyhow::anyhow!("Unsupported role: {}", role)),
22        };
23
24        // Handle content (can be string, array, or null)
25        let content = if msg["content"].is_null() {
26            // Null content is allowed for assistant messages with tool_calls
27            String::new()
28        } else if let Some(content_str) = msg["content"].as_str() {
29            // Simple string content
30            content_str.to_string()
31        } else if let Some(content_array) = msg["content"].as_array() {
32            // Array content (e.g., from Codex with text and images)
33            // Extract text parts and concatenate them
34            let mut text_parts = Vec::new();
35            for part in content_array {
36                if let Some(text) = part["text"].as_str() {
37                    text_parts.push(text);
38                } else if let Some(text) = part.as_str() {
39                    // Sometimes the array contains direct strings
40                    text_parts.push(text);
41                }
42            }
43            if text_parts.is_empty() {
44                return Err(anyhow::anyhow!("Content array has no text parts"));
45            }
46            text_parts.join("\n")
47        } else {
48            return Err(anyhow::anyhow!(
49                "Content must be string, array, or null, got: {:?}",
50                msg["content"]
51            ));
52        };
53
54        // Extract tool_calls if present (for assistant messages)
55        let tool_calls = if role == "assistant" {
56            msg.get("tool_calls")
57                .and_then(|tc| serde_json::from_value(tc.clone()).ok())
58        } else {
59            None
60        };
61
62        // Extract tool_call_id if present (for tool messages)
63        let tool_call_id = if role == "tool" {
64            msg.get("tool_call_id")
65                .and_then(|id| id.as_str())
66                .map(|s| s.to_string())
67        } else {
68            None
69        };
70
71        llm_messages.push(LlmMessage {
72            role: llm_role,
73            content: vec![MessageBlock::Text { text: content }],
74            name: None,
75            tool_calls,
76            tool_call_id,
77            reasoning_content: None,
78            reasoning: None,
79            thought: None,
80            thinking: None,
81        });
82    }
83
84    Ok(llm_messages)
85}
86
87/// Convert Response to OpenAI format
88pub fn response_to_openai(response: Response) -> Value {
89    let mut message = serde_json::json!({
90        "role": "assistant",
91        "content": response.content
92    });
93
94    // Add tool_calls if present
95    if let Some(tool_calls) = response.tool_calls {
96        message["tool_calls"] = tool_calls;
97    }
98
99    serde_json::json!({
100        "id": uuid::Uuid::new_v4().to_string(),
101        "object": "chat.completion",
102        "created": chrono::Utc::now().timestamp(),
103        "model": response.model,
104        "choices": [{
105            "index": 0,
106            "message": message,
107            "finish_reason": "stop"
108        }],
109        "usage": {
110            "prompt_tokens": response.usage.prompt_tokens,
111            "completion_tokens": response.usage.completion_tokens,
112            "total_tokens": response.usage.total_tokens
113        }
114    })
115}
116
117/// Convert Response to Ollama format
118pub fn response_to_ollama(response: Response) -> Value {
119    serde_json::json!({
120        "model": response.model,
121        "created_at": chrono::Utc::now().to_rfc3339(),
122        "message": {
123            "role": "assistant",
124            "content": response.content
125        },
126        "done": true,
127        "total_duration": 0,
128        "load_duration": 0,
129        "prompt_eval_count": response.usage.prompt_tokens,
130        "prompt_eval_duration": 0,
131        "eval_count": response.usage.completion_tokens,
132        "eval_duration": 0
133    })
134}
135
136/// Convert OpenAI tools format to llm-connector format
137pub fn openai_tools_to_llm(tools: Vec<Value>) -> Vec<Tool> {
138    tools
139        .into_iter()
140        .filter_map(|tool| {
141            let tool_type = tool.get("type")?.as_str()?.to_string();
142            let function = tool.get("function")?;
143
144            Some(Tool {
145                tool_type,
146                function: Function {
147                    name: function.get("name")?.as_str()?.to_string(),
148                    description: function
149                        .get("description")
150                        .and_then(|d| d.as_str())
151                        .map(String::from),
152                    parameters: function.get("parameters")?.clone(),
153                },
154            })
155        })
156        .collect()
157}
158
159/// Convert model list to Ollama format
160pub fn models_to_ollama(models: Vec<crate::llm::Model>) -> Vec<Value> {
161    models
162        .into_iter()
163        .map(|model| {
164            let family = model.id.split('-').next().unwrap_or("unknown");
165            serde_json::json!({
166                "name": model.id,
167                "model": model.id,
168                "modified_at": chrono::Utc::now().to_rfc3339(),
169                "size": 1000000,
170                "digest": format!("sha256:{}", "0".repeat(64)),
171                "details": {
172                    "parent_model": "",
173                    "format": "gguf",
174                    "family": family,
175                    "families": [family],
176                    "parameter_size": "7B",
177                    "quantization_level": "Q4_K_M"
178                },
179                "expires_at": null
180            })
181        })
182        .collect()
183}
184