use crate::models::*;
use uuid::Uuid;
pub fn chat_to_responses(
chat_resp: ChatCompletionResponse,
original_model: String,
) -> ResponsesResponse {
let msg_id = format!("msg_{}", Uuid::new_v4().to_string().replace('-', ""));
let mut output_items: Vec<OutputItem> = Vec::new();
let mut incomplete_details: Option<serde_json::Value> = None;
if let Some(choice) = chat_resp.choices.first() {
if let Some(ref rc) = choice.message.reasoning_content
&& !rc.is_empty()
{
output_items.push(OutputItem::Reasoning(OutputReasoning {
id: format!("rs_{}", Uuid::new_v4().to_string().replace('-', "")),
summary: vec![],
content: vec![serde_json::json!({
"type": "reasoning_text",
"text": rc
})],
}));
}
let mut content_blocks: Vec<OutputContentBlock> = Vec::new();
if let Some(ref text) = choice.message.content {
if !text.is_empty() {
content_blocks.push(OutputContentBlock::Text {
text: text.clone(),
annotations: vec![],
});
}
} else if choice.message.content.is_none() && choice.message.tool_calls.is_none() {
if choice.finish_reason.as_deref() == Some("content_filter") {
content_blocks.push(OutputContentBlock::Refusal {
refusal: "content_filter".into(),
});
} else {
content_blocks.push(OutputContentBlock::Text {
text: String::new(),
annotations: vec![],
});
}
}
let status = match choice.finish_reason.as_deref() {
Some("stop") => "completed",
Some("tool_calls") => "completed",
Some("length") => "completed",
Some("content_filter") => "incomplete",
Some("insufficient_system_resource") => "incomplete",
_ => "completed",
};
incomplete_details = match choice.finish_reason.as_deref() {
Some("content_filter") => Some(serde_json::json!({
"reason": "content_filter"
})),
Some("length") => Some(serde_json::json!({
"reason": "max_output_tokens"
})),
Some("insufficient_system_resource") => None,
_ => None,
};
if !content_blocks.is_empty()
|| choice.message.content.is_some()
|| choice.message.tool_calls.is_none()
{
output_items.push(OutputItem::Message(OutputMessage {
id: msg_id.clone(),
role: "assistant",
status,
content: content_blocks,
}));
}
if let Some(ref tool_calls) = choice.message.tool_calls {
for tc in tool_calls {
output_items.push(OutputItem::FunctionCall(OutputFunctionCall {
id: format!("fc_{}", Uuid::new_v4().to_string().replace('-', "")),
call_id: tc.id.clone(),
name: tc.function.name.clone(),
arguments: tc.function.arguments.clone(),
status: "completed",
}));
}
}
}
let error = chat_resp.error.map(|e| {
serde_json::json!({
"code": e.code.unwrap_or_default(),
"message": e.message,
})
});
let usage = chat_resp.usage.map(|u| {
let reasoning_tokens = u
.completion_tokens_details
.as_ref()
.and_then(|d| d.get("reasoning_tokens"))
.and_then(|v| v.as_u64())
.unwrap_or(0) as u32;
let cached_tokens = u
.prompt_tokens_details
.as_ref()
.and_then(|d| d.get("cached_tokens"))
.and_then(|v| v.as_u64())
.unwrap_or_else(|| {
u.prompt_cache_hit_tokens.unwrap_or(0) as u64
+ u.prompt_cache_miss_tokens.unwrap_or(0) as u64
}) as u32;
ResponseUsage {
input_tokens: u.prompt_tokens,
output_tokens: u.completion_tokens,
total_tokens: u.total_tokens,
input_tokens_details: InputTokensDetails { cached_tokens },
output_tokens_details: OutputTokensDetails { reasoning_tokens },
}
});
let response = ResponsesResponse {
id: if chat_resp.id.starts_with("resp_") {
chat_resp.id
} else {
format!("resp_{}", Uuid::new_v4().to_string().replace('-', ""))
},
object: "response",
created_at: chat_resp.created as f64,
status: if error.is_some() {
"failed".into()
} else {
"completed".into()
},
model: original_model,
output: output_items,
usage,
incomplete_details,
error,
};
tracing::debug!(
"Converted response: {} output items, status={}",
response.output.len(),
response.status,
);
response
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_simple_text_response() {
let chat = ChatCompletionResponse {
id: "chatcmpl-123".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Hello! How can I help you?".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: Some(ChatUsage {
prompt_tokens: 10,
completion_tokens: 5,
total_tokens: 15,
completion_tokens_details: None,
prompt_tokens_details: None,
prompt_cache_hit_tokens: None,
prompt_cache_miss_tokens: None,
}),
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.object, "response");
assert_eq!(resp.model, "deepseek-v4-pro");
assert_eq!(resp.status, "completed");
assert_eq!(resp.created_at, 1715550000.0);
assert_eq!(resp.output.len(), 1);
match &resp.output[0] {
OutputItem::Message(msg) => {
assert_eq!(msg.role, "assistant");
assert_eq!(msg.status, "completed");
assert_eq!(msg.content.len(), 1);
match &msg.content[0] {
OutputContentBlock::Text { text, annotations } => {
assert_eq!(text, "Hello! How can I help you?");
assert!(annotations.is_empty());
}
_ => panic!("Expected text content block"),
}
}
_ => panic!("Expected message output item"),
}
let usage = resp.usage.unwrap();
assert_eq!(usage.input_tokens, 10);
assert_eq!(usage.output_tokens, 5);
assert_eq!(usage.total_tokens, 15);
}
#[test]
fn test_response_with_tool_calls() {
let chat = ChatCompletionResponse {
id: "chatcmpl-456".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Let me check the weather.".into()),
reasoning_content: None,
tool_calls: Some(vec![ChatToolCall {
id: "call_abc".into(),
call_type: "function".into(),
function: ChatFunctionCall {
name: "get_weather".into(),
arguments: r#"{"city":"NYC"}"#.into(),
},
}]),
},
finish_reason: Some("tool_calls".into()),
}],
usage: Some(ChatUsage {
prompt_tokens: 20,
completion_tokens: 10,
total_tokens: 30,
completion_tokens_details: None,
prompt_tokens_details: None,
prompt_cache_hit_tokens: None,
prompt_cache_miss_tokens: None,
}),
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.output.len(), 2);
match &resp.output[0] {
OutputItem::Message(msg) => match &msg.content[0] {
OutputContentBlock::Text { text, .. } => {
assert_eq!(text, "Let me check the weather.");
}
_ => panic!("Expected text"),
},
_ => panic!("Expected message"),
}
match &resp.output[1] {
OutputItem::FunctionCall(fc) => {
assert_eq!(fc.call_id, "call_abc");
assert_eq!(fc.name, "get_weather");
assert_eq!(fc.arguments, r#"{"city":"NYC"}"#);
}
_ => panic!("Expected function_call"),
}
}
#[test]
fn test_usage_mapping() {
let chat = ChatCompletionResponse {
id: "chatcmpl-789".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Hi".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: Some(ChatUsage {
prompt_tokens: 100,
completion_tokens: 50,
total_tokens: 150,
completion_tokens_details: Some(serde_json::json!({"reasoning_tokens": 20})),
prompt_tokens_details: None,
prompt_cache_hit_tokens: None,
prompt_cache_miss_tokens: None,
}),
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
let usage = resp.usage.unwrap();
assert_eq!(usage.input_tokens, 100);
assert_eq!(usage.output_tokens, 50);
assert_eq!(usage.total_tokens, 150);
assert_eq!(usage.output_tokens_details.reasoning_tokens, 20);
assert_eq!(usage.input_tokens_details.cached_tokens, 0);
}
#[test]
fn test_multiple_choices() {
let chat = ChatCompletionResponse {
id: "chatcmpl-multi".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![
ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("First choice".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
},
ChatChoice {
index: 1,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Second choice".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
},
],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.output.len(), 1);
match &resp.output[0] {
OutputItem::Message(msg) => match &msg.content[0] {
OutputContentBlock::Text { text, .. } => {
assert_eq!(text, "First choice");
}
_ => panic!("Expected text"),
},
_ => panic!("Expected message"),
}
}
#[test]
fn test_length_finish_reason() {
let chat = ChatCompletionResponse {
id: "chatcmpl-len".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Truncated...".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("length".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.status, "completed");
}
#[test]
fn test_content_filter_finish_reason() {
let chat = ChatCompletionResponse {
id: "chatcmpl-cf".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: None,
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("content_filter".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
match &resp.output[0] {
OutputItem::Message(msg) => {
assert_eq!(msg.status, "incomplete");
}
_ => panic!("Expected message"),
}
}
#[test]
fn test_error_response() {
let chat = ChatCompletionResponse {
id: "".into(),
object: "error".into(),
created: 0,
model: "".into(),
choices: vec![],
usage: None,
error: Some(ChatError {
message: "Invalid API key".into(),
code: Some("invalid_api_key".into()),
}),
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.status, "failed");
assert!(resp.error.is_some());
assert_eq!(resp.output.len(), 0);
}
#[test]
fn test_response_with_reasoning_tokens() {
let chat = ChatCompletionResponse {
id: "chatcmpl-reason".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Answer".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: Some(ChatUsage {
prompt_tokens: 50,
completion_tokens: 100,
total_tokens: 150,
completion_tokens_details: Some(serde_json::json!({"reasoning_tokens": 80})),
prompt_tokens_details: None,
prompt_cache_hit_tokens: None,
prompt_cache_miss_tokens: None,
}),
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
let usage = resp.usage.unwrap();
assert_eq!(usage.output_tokens_details.reasoning_tokens, 80);
assert_eq!(usage.output_tokens, 100);
}
#[test]
fn test_response_id_is_prefixed() {
let chat = ChatCompletionResponse {
id: "some-chat-id-12345".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Hi".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert!(resp.id.starts_with("resp_"));
}
#[test]
fn test_empty_choices_produces_empty_output() {
let chat = ChatCompletionResponse {
id: "chatcmpl-empty".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.status, "completed");
assert_eq!(resp.output.len(), 0);
}
#[test]
fn test_null_content_with_tool_calls() {
let chat = ChatCompletionResponse {
id: "chatcmpl-tools".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: None,
reasoning_content: None,
tool_calls: Some(vec![ChatToolCall {
id: "call_xyz".into(),
call_type: "function".into(),
function: ChatFunctionCall {
name: "search".into(),
arguments: r#"{"q":"test"}"#.into(),
},
}]),
},
finish_reason: Some("tool_calls".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert_eq!(resp.output.len(), 1); match &resp.output[0] {
OutputItem::FunctionCall(fc) => {
assert_eq!(fc.name, "search");
}
other => panic!("Expected FunctionCall, got {:?}", other),
}
}
#[test]
fn test_content_filter_produces_refusal() {
let chat = ChatCompletionResponse {
id: "chatcmpl-cf2".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: None,
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("content_filter".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
match &resp.output[0] {
OutputItem::Message(msg) => {
assert_eq!(msg.status, "incomplete");
match &msg.content[0] {
OutputContentBlock::Refusal { refusal } => {
assert_eq!(refusal, "content_filter");
}
other => panic!("Expected Refusal, got {:?}", other),
}
}
other => panic!("Expected Message, got {:?}", other),
}
}
#[test]
fn test_incomplete_details_for_length() {
let chat = ChatCompletionResponse {
id: "chatcmpl-len2".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("truncated...".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("length".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
let details = resp.incomplete_details.unwrap();
assert_eq!(details["reason"], "max_output_tokens");
}
#[test]
fn test_incomplete_details_for_content_filter() {
let chat = ChatCompletionResponse {
id: "chatcmpl-cf3".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("filtered...".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("content_filter".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
let details = resp.incomplete_details.unwrap();
assert_eq!(details["reason"], "content_filter");
}
#[test]
fn test_no_incomplete_details_for_stop() {
let chat = ChatCompletionResponse {
id: "chatcmpl-stop".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("done".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
assert!(resp.incomplete_details.is_none());
}
#[test]
fn test_prompt_cache_tokens_mapped() {
let chat = ChatCompletionResponse {
id: "chatcmpl-cache".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("cached response".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("stop".into()),
}],
usage: Some(ChatUsage {
prompt_tokens: 100,
completion_tokens: 30,
total_tokens: 130,
completion_tokens_details: None,
prompt_tokens_details: Some(serde_json::json!({
"cached_tokens": 100
})),
prompt_cache_hit_tokens: None,
prompt_cache_miss_tokens: None,
}),
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
let usage = resp.usage.unwrap();
assert_eq!(usage.input_tokens_details.cached_tokens, 100);
assert_eq!(usage.input_tokens, 100);
assert_eq!(usage.output_tokens, 30);
}
#[test]
fn test_insufficient_system_resource() {
let chat = ChatCompletionResponse {
id: "chatcmpl-isr".into(),
object: "chat.completion".into(),
created: 1715550000,
model: "deepseek-v4-pro".into(),
choices: vec![ChatChoice {
index: 0,
message: ChatResponseMessage {
role: Some("assistant".into()),
content: Some("Partial...".into()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: Some("insufficient_system_resource".into()),
}],
usage: None,
error: None,
};
let resp = chat_to_responses(chat, "deepseek-v4-pro".into());
match &resp.output[0] {
OutputItem::Message(msg) => {
assert_eq!(msg.status, "incomplete");
}
_ => panic!("Expected message"),
}
assert!(resp.incomplete_details.is_none());
}
}