use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
#[derive(Debug, Clone, Serialize, Default)]
pub struct ChatCompletionRequest {
pub messages: Vec<RequestMessage>,
pub model: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub temperature: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub top_p: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub max_tokens: Option<usize>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stop: Option<Vec<String>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub stream: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
pub frequency_penalty: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub presence_penalty: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub response_format: Option<ResponseFormat>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tools: Option<Vec<RequestTool>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_choice: Option<JsonValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub parallel_tool_calls: Option<bool>,
}
#[derive(Debug, Clone, Serialize)]
pub struct RequestTool {
#[serde(rename = "type")]
pub tool_type: String,
pub function: RequestFunction,
}
#[derive(Debug, Clone, Serialize)]
pub struct RequestFunction {
pub name: String,
pub description: String,
pub parameters: JsonValue,
#[serde(skip_serializing_if = "Option::is_none")]
pub strict: Option<bool>,
}
#[derive(Debug, Clone, Serialize, PartialEq)]
#[serde(untagged)]
pub enum RequestContent {
Text(String),
Parts(Vec<ContentPart>),
}
#[derive(Debug, Clone, Serialize, PartialEq)]
#[serde(tag = "type")]
pub enum ContentPart {
#[serde(rename = "text")]
Text { text: String },
#[serde(rename = "image_url")]
ImageUrl { image_url: ImageUrlContent },
}
#[derive(Debug, Clone, Serialize, PartialEq)]
pub struct ImageUrlContent {
pub url: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub detail: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct RequestMessage {
pub role: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub content: Option<RequestContent>,
#[serde(skip_serializing_if = "Option::is_none")]
pub name: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_calls: Option<Vec<ResponseToolCall>>,
#[serde(skip_serializing_if = "Option::is_none")]
pub tool_call_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cache_control: Option<RequestCacheControl>,
}
#[derive(Debug, Clone, Serialize)]
pub struct RequestCacheControl {
#[serde(rename = "type")]
pub cache_type: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct ResponseFormat {
#[serde(rename = "type")]
pub format_type: String,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct ChatCompletionResponse {
pub id: String,
#[serde(default)]
pub object: Option<String>,
#[serde(default)]
pub created: Option<u64>,
pub model: String,
pub choices: Vec<Choice>,
pub usage: Option<Usage>,
#[serde(flatten)]
pub extra: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Choice {
#[serde(default)]
pub index: Option<usize>,
pub message: ResponseMessage,
pub finish_reason: Option<String>,
#[serde(flatten)]
pub extra: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct ResponseMessage {
pub role: String,
pub content: Option<String>,
#[serde(default)]
pub tool_calls: Option<Vec<ResponseToolCall>>,
#[serde(flatten)]
pub extra: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResponseToolCall {
pub id: String,
#[serde(rename = "type")]
pub call_type: String,
pub function: ResponseFunctionCall,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResponseFunctionCall {
pub name: String,
pub arguments: String,
}
#[derive(Debug, Clone, Deserialize, Default)]
pub struct PromptTokensDetails {
#[serde(default)]
pub cached_tokens: Option<usize>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Usage {
pub prompt_tokens: usize,
pub completion_tokens: usize,
pub total_tokens: usize,
#[serde(default)]
pub prompt_tokens_details: Option<PromptTokensDetails>,
#[serde(flatten)]
pub extra: Option<serde_json::Value>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct ChatCompletionChunk {
pub id: String,
#[serde(default)]
pub object: Option<String>,
#[serde(default)]
pub created: Option<u64>,
#[serde(default)]
pub model: Option<String>,
pub choices: Vec<ChunkChoice>,
#[serde(default)]
pub usage: Option<Usage>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct ChunkChoice {
#[serde(default)]
pub index: Option<usize>,
pub delta: Delta,
pub finish_reason: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Delta {
pub role: Option<String>,
pub content: Option<String>,
#[serde(default)]
pub tool_calls: Option<Vec<DeltaToolCall>>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct DeltaToolCall {
pub index: usize,
#[serde(default)]
pub id: Option<String>,
#[serde(rename = "type", default)]
pub tool_type: Option<String>,
#[serde(default)]
pub function: Option<DeltaFunction>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct DeltaFunction {
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub arguments: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct ModelsResponse {
pub data: Vec<Model>,
#[serde(default)]
pub object: Option<String>,
}
#[derive(Debug, Clone, Default, Deserialize)]
#[allow(dead_code)]
pub struct ModelLimits {
#[serde(default)]
pub max_context_window_tokens: Option<usize>,
#[serde(default)]
pub max_output_tokens: Option<usize>,
#[serde(default)]
pub max_prompt_tokens: Option<usize>,
#[serde(default)]
pub max_inputs: Option<usize>,
}
#[derive(Debug, Clone, Default, Deserialize)]
#[allow(dead_code)]
pub struct ModelSupports {
#[serde(default)]
pub tool_calls: Option<bool>,
#[serde(default)]
pub parallel_tool_calls: Option<bool>,
#[serde(default)]
pub dimensions: Option<bool>,
}
#[derive(Debug, Clone, Default, Deserialize)]
#[allow(dead_code)]
pub struct ModelCapabilities {
#[serde(default)]
pub family: Option<String>,
#[serde(default)]
pub limits: ModelLimits,
#[serde(default)]
pub object: Option<String>,
#[serde(default)]
pub supports: ModelSupports,
#[serde(default)]
pub tokenizer: Option<String>,
#[serde(default, rename = "type")]
pub model_type: Option<String>,
}
#[derive(Debug, Clone, Default, Deserialize)]
#[allow(dead_code)]
pub struct ModelPolicy {
#[serde(default)]
pub state: Option<String>,
#[serde(default)]
pub terms: Option<String>,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Model {
pub id: String,
#[serde(default)]
pub object: Option<String>,
#[serde(default)]
pub name: Option<String>,
#[serde(default)]
pub vendor: Option<String>,
#[serde(default)]
pub version: Option<String>,
#[serde(default)]
pub capabilities: Option<ModelCapabilities>,
#[serde(default)]
pub model_picker_enabled: Option<bool>,
#[serde(default)]
pub preview: Option<bool>,
#[serde(default)]
pub policy: Option<ModelPolicy>,
#[serde(default)]
pub created: Option<u64>,
#[serde(default)]
pub owned_by: Option<String>,
}
impl Model {
pub fn max_context_tokens(&self) -> Option<usize> {
self.capabilities
.as_ref()
.and_then(|c| c.limits.max_context_window_tokens)
}
pub fn max_output_tokens(&self) -> Option<usize> {
self.capabilities
.as_ref()
.and_then(|c| c.limits.max_output_tokens)
}
pub fn supports_tools(&self) -> bool {
self.capabilities
.as_ref()
.and_then(|c| c.supports.tool_calls)
.unwrap_or(false)
}
}
#[derive(Debug, Clone, Serialize)]
#[serde(untagged)]
pub enum EmbeddingInput {
Single(String),
Multiple(Vec<String>),
}
impl From<String> for EmbeddingInput {
fn from(s: String) -> Self {
EmbeddingInput::Single(s)
}
}
impl From<&str> for EmbeddingInput {
fn from(s: &str) -> Self {
EmbeddingInput::Single(s.to_string())
}
}
impl From<Vec<String>> for EmbeddingInput {
fn from(v: Vec<String>) -> Self {
EmbeddingInput::Multiple(v)
}
}
#[derive(Debug, Clone, Serialize)]
pub struct EmbeddingRequest {
pub input: EmbeddingInput,
pub model: String,
}
impl EmbeddingRequest {
pub fn new(input: impl Into<EmbeddingInput>, model: impl Into<String>) -> Self {
Self {
input: input.into(),
model: model.into(),
}
}
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct Embedding {
pub object: String,
pub embedding: Vec<f32>,
pub index: usize,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct EmbeddingUsage {
pub prompt_tokens: usize,
pub total_tokens: usize,
}
#[derive(Debug, Clone, Deserialize)]
#[allow(dead_code)]
pub struct EmbeddingResponse {
pub object: String,
pub data: Vec<Embedding>,
pub model: String,
pub usage: EmbeddingUsage,
}
impl EmbeddingResponse {
pub fn first_embedding(&self) -> Option<&Vec<f32>> {
self.data.first().map(|e| &e.embedding)
}
pub fn embeddings(&self) -> Vec<&Vec<f32>> {
self.data.iter().map(|e| &e.embedding).collect()
}
pub fn dimension(&self) -> Option<usize> {
self.data.first().map(|e| e.embedding.len())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_embedding_input_from_string() {
let input: EmbeddingInput = "hello world".into();
match input {
EmbeddingInput::Single(s) => assert_eq!(s, "hello world"),
_ => panic!("Expected Single variant"),
}
}
#[test]
fn test_embedding_input_from_owned_string() {
let input: EmbeddingInput = String::from("hello world").into();
match input {
EmbeddingInput::Single(s) => assert_eq!(s, "hello world"),
_ => panic!("Expected Single variant"),
}
}
#[test]
fn test_embedding_input_from_vec() {
let texts = vec!["hello".to_string(), "world".to_string()];
let input: EmbeddingInput = texts.into();
match input {
EmbeddingInput::Multiple(v) => {
assert_eq!(v.len(), 2);
assert_eq!(v[0], "hello");
assert_eq!(v[1], "world");
}
_ => panic!("Expected Multiple variant"),
}
}
#[test]
fn test_embedding_request_serialization() {
let request = EmbeddingRequest::new("test input", "text-embedding-3-small");
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"input\":\"test input\""));
assert!(json.contains("\"model\":\"text-embedding-3-small\""));
}
#[test]
fn test_embedding_request_multiple_inputs() {
let input = EmbeddingInput::Multiple(vec!["one".to_string(), "two".to_string()]);
let request = EmbeddingRequest::new(input, "model");
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("[\"one\",\"two\"]"));
}
#[test]
fn test_embedding_response_deserialization() {
let json = r#"{
"object": "list",
"data": [
{
"object": "embedding",
"embedding": [0.1, 0.2, 0.3],
"index": 0
}
],
"model": "text-embedding-3-small",
"usage": {
"prompt_tokens": 5,
"total_tokens": 5
}
}"#;
let response: EmbeddingResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.object, "list");
assert_eq!(response.model, "text-embedding-3-small");
assert_eq!(response.data.len(), 1);
assert_eq!(response.data[0].embedding, vec![0.1, 0.2, 0.3]);
assert_eq!(response.usage.prompt_tokens, 5);
}
#[test]
fn test_embedding_response_helpers() {
let json = r#"{
"object": "list",
"data": [
{"object": "embedding", "embedding": [1.0, 2.0], "index": 0},
{"object": "embedding", "embedding": [3.0, 4.0], "index": 1}
],
"model": "test",
"usage": {"prompt_tokens": 10, "total_tokens": 10}
}"#;
let response: EmbeddingResponse = serde_json::from_str(json).unwrap();
let first = response.first_embedding().unwrap();
assert_eq!(*first, vec![1.0, 2.0]);
assert_eq!(response.dimension(), Some(2));
let all = response.embeddings();
assert_eq!(all.len(), 2);
}
#[test]
fn test_model_deserialization_full() {
let json = r#"{
"id": "gpt-4o",
"object": "model",
"name": "GPT-4o",
"vendor": "azure-openai",
"version": "2024-08-06",
"capabilities": {
"family": "gpt-4o",
"limits": {
"max_context_window_tokens": 128000,
"max_output_tokens": 16384
},
"supports": {
"tool_calls": true,
"parallel_tool_calls": true
},
"tokenizer": "cl100k_base",
"type": "chat"
},
"model_picker_enabled": true,
"preview": false
}"#;
let model: Model = serde_json::from_str(json).unwrap();
assert_eq!(model.id, "gpt-4o");
assert_eq!(model.name, Some("GPT-4o".to_string()));
assert_eq!(model.vendor, Some("azure-openai".to_string()));
assert!(model.supports_tools());
assert_eq!(model.max_context_tokens(), Some(128000));
assert_eq!(model.max_output_tokens(), Some(16384));
}
#[test]
fn test_model_deserialization_minimal() {
let json = r#"{
"id": "gpt-4",
"object": "model",
"created": 1687882410,
"owned_by": "openai"
}"#;
let model: Model = serde_json::from_str(json).unwrap();
assert_eq!(model.id, "gpt-4");
assert_eq!(model.created, Some(1687882410));
assert_eq!(model.owned_by, Some("openai".to_string()));
assert!(!model.supports_tools()); }
#[test]
fn test_model_limits() {
let limits = ModelLimits {
max_context_window_tokens: Some(128000),
max_output_tokens: Some(4096),
max_prompt_tokens: None,
max_inputs: None,
};
assert_eq!(limits.max_context_window_tokens, Some(128000));
assert_eq!(limits.max_output_tokens, Some(4096));
}
#[test]
fn test_models_response_deserialization() {
let json = r#"{
"data": [
{"id": "gpt-4o", "object": "model"},
{"id": "gpt-4o-mini", "object": "model"}
]
}"#;
let response: ModelsResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.data.len(), 2);
assert_eq!(response.data[0].id, "gpt-4o");
assert_eq!(response.data[1].id, "gpt-4o-mini");
}
#[test]
fn test_chat_completion_response_deserialization() {
let json = r#"{
"id": "chatcmpl-ABC123",
"object": "chat.completion",
"created": 1699876543,
"model": "gpt-4o-mini",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "Hello! How can I help you today?"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 12,
"completion_tokens": 8,
"total_tokens": 20
}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-ABC123");
assert_eq!(response.object, Some("chat.completion".to_string()));
assert_eq!(response.created, Some(1699876543));
assert_eq!(response.model, "gpt-4o-mini");
assert_eq!(response.choices.len(), 1);
let choice = &response.choices[0];
assert_eq!(choice.index, Some(0));
assert_eq!(choice.message.role, "assistant");
assert_eq!(
choice.message.content,
Some("Hello! How can I help you today?".to_string())
);
assert_eq!(choice.finish_reason, Some("stop".to_string()));
let usage = response.usage.as_ref().unwrap();
assert_eq!(usage.prompt_tokens, 12);
assert_eq!(usage.completion_tokens, 8);
assert_eq!(usage.total_tokens, 20);
}
#[test]
fn test_chat_completion_response_with_tool_calls() {
let json = r#"{
"id": "chatcmpl-tool123",
"object": "chat.completion",
"created": 1699876543,
"model": "gpt-4o",
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"San Francisco\"}"
}
}
]
},
"finish_reason": "tool_calls"
}
],
"usage": {
"prompt_tokens": 50,
"completion_tokens": 25,
"total_tokens": 75
}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-tool123");
let choice = &response.choices[0];
assert_eq!(choice.finish_reason, Some("tool_calls".to_string()));
assert!(choice.message.content.is_none());
let tool_calls = choice.message.tool_calls.as_ref().unwrap();
assert_eq!(tool_calls.len(), 1);
assert_eq!(tool_calls[0].id, "call_abc123");
assert_eq!(tool_calls[0].call_type, "function");
assert_eq!(tool_calls[0].function.name, "get_weather");
assert!(tool_calls[0].function.arguments.contains("San Francisco"));
}
#[test]
fn test_chat_completion_response_multiple_choices() {
let json = r#"{
"id": "chatcmpl-multi",
"model": "gpt-4o",
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": "Option A"},
"finish_reason": "stop"
},
{
"index": 1,
"message": {"role": "assistant", "content": "Option B"},
"finish_reason": "stop"
}
],
"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.choices.len(), 2);
assert_eq!(
response.choices[0].message.content,
Some("Option A".to_string())
);
assert_eq!(
response.choices[1].message.content,
Some("Option B".to_string())
);
}
#[test]
fn test_chat_completion_response_with_extra_fields() {
let json = r#"{
"id": "chatcmpl-extra",
"model": "gpt-4o",
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": "Hello"},
"finish_reason": "stop",
"content_filter_results": {
"hate": {"filtered": false}
}
}
],
"usage": {"prompt_tokens": 5, "completion_tokens": 1, "total_tokens": 6},
"system_fingerprint": "fp_abc123"
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-extra");
assert_eq!(
response.choices[0].message.content,
Some("Hello".to_string())
);
}
#[test]
fn test_streaming_chunk_first_chunk() {
let json = r#"{
"id": "chatcmpl-stream123",
"object": "chat.completion.chunk",
"created": 1699876543,
"model": "gpt-4o",
"choices": [
{
"index": 0,
"delta": {
"role": "assistant",
"content": ""
},
"finish_reason": null
}
]
}"#;
let chunk: ChatCompletionChunk = serde_json::from_str(json).unwrap();
assert_eq!(chunk.id, "chatcmpl-stream123");
assert_eq!(chunk.object, Some("chat.completion.chunk".to_string()));
assert_eq!(chunk.model, Some("gpt-4o".to_string()));
let choice = &chunk.choices[0];
assert_eq!(choice.delta.role, Some("assistant".to_string()));
assert_eq!(choice.delta.content, Some("".to_string()));
assert!(choice.finish_reason.is_none());
}
#[test]
fn test_streaming_chunk_content_delta() {
let json = r#"{
"id": "chatcmpl-stream123",
"object": "chat.completion.chunk",
"created": 1699876543,
"model": "gpt-4o",
"choices": [
{
"index": 0,
"delta": {
"content": "Hello"
},
"finish_reason": null
}
]
}"#;
let chunk: ChatCompletionChunk = serde_json::from_str(json).unwrap();
let choice = &chunk.choices[0];
assert!(choice.delta.role.is_none());
assert_eq!(choice.delta.content, Some("Hello".to_string()));
}
#[test]
fn test_streaming_chunk_final_chunk() {
let json = r#"{
"id": "chatcmpl-stream123",
"object": "chat.completion.chunk",
"created": 1699876543,
"model": "gpt-4o",
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 20,
"completion_tokens": 50,
"total_tokens": 70
}
}"#;
let chunk: ChatCompletionChunk = serde_json::from_str(json).unwrap();
let choice = &chunk.choices[0];
assert!(choice.delta.content.is_none());
assert_eq!(choice.finish_reason, Some("stop".to_string()));
let usage = chunk.usage.as_ref().unwrap();
assert_eq!(usage.total_tokens, 70);
}
#[test]
fn test_streaming_chunk_empty_delta() {
let json = r#"{
"id": "chatcmpl-stream123",
"model": "gpt-4o",
"choices": [
{
"index": 0,
"delta": {},
"finish_reason": null
}
]
}"#;
let chunk: ChatCompletionChunk = serde_json::from_str(json).unwrap();
let choice = &chunk.choices[0];
assert!(choice.delta.role.is_none());
assert!(choice.delta.content.is_none());
}
#[test]
fn test_chat_completion_request_minimal() {
let request = ChatCompletionRequest {
messages: vec![RequestMessage {
role: "user".to_string(),
content: Some(RequestContent::Text("Hello".to_string())),
name: None,
tool_calls: None,
tool_call_id: None,
cache_control: None,
}],
model: "gpt-4o".to_string(),
..Default::default()
};
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"role\":\"user\""));
assert!(json.contains("\"Hello\""));
assert!(json.contains("\"model\":\"gpt-4o\""));
assert!(!json.contains("temperature"));
assert!(!json.contains("max_tokens"));
}
#[test]
fn test_chat_completion_request_with_options() {
let request = ChatCompletionRequest {
messages: vec![RequestMessage {
role: "user".to_string(),
content: Some(RequestContent::Text("Hello".to_string())),
name: None,
tool_calls: None,
tool_call_id: None,
cache_control: None,
}],
model: "gpt-4o".to_string(),
temperature: Some(0.7),
max_tokens: Some(1000),
stream: Some(true),
..Default::default()
};
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"temperature\":0.7"));
assert!(json.contains("\"max_tokens\":1000"));
assert!(json.contains("\"stream\":true"));
}
#[test]
fn test_chat_completion_request_with_tools() {
let request = ChatCompletionRequest {
messages: vec![RequestMessage {
role: "user".to_string(),
content: Some(RequestContent::Text("What's the weather?".to_string())),
name: None,
tool_calls: None,
tool_call_id: None,
cache_control: None,
}],
model: "gpt-4o".to_string(),
tools: Some(vec![RequestTool {
tool_type: "function".to_string(),
function: RequestFunction {
name: "get_weather".to_string(),
description: "Get weather info".to_string(),
parameters: serde_json::json!({
"type": "object",
"properties": {
"location": {"type": "string"}
},
"required": ["location"]
}),
strict: Some(true),
},
}]),
..Default::default()
};
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"tools\""));
assert!(json.contains("\"get_weather\""));
assert!(json.contains("\"strict\":true"));
}
#[test]
fn test_response_format_serialization() {
let format = ResponseFormat {
format_type: "json_object".to_string(),
};
let json = serde_json::to_string(&format).unwrap();
assert!(json.contains("\"type\":\"json_object\""));
}
#[test]
fn test_tool_call_roundtrip() {
let tool_call = ResponseToolCall {
id: "call_123".to_string(),
call_type: "function".to_string(),
function: ResponseFunctionCall {
name: "test_func".to_string(),
arguments: "{\"arg\": \"value\"}".to_string(),
},
};
let json = serde_json::to_string(&tool_call).unwrap();
assert!(json.contains("\"id\":\"call_123\""));
assert!(json.contains("\"type\":\"function\""));
let parsed: ResponseToolCall = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.id, "call_123");
assert_eq!(parsed.function.name, "test_func");
}
#[test]
fn test_model_with_embedding_capabilities() {
let json = r#"{
"id": "text-embedding-3-small",
"object": "model",
"name": "Text Embedding 3 Small",
"capabilities": {
"family": "text-embedding-3",
"limits": {
"max_inputs": 2048
},
"supports": {
"dimensions": true
},
"type": "embeddings"
}
}"#;
let model: Model = serde_json::from_str(json).unwrap();
assert_eq!(model.id, "text-embedding-3-small");
let caps = model.capabilities.as_ref().unwrap();
assert_eq!(caps.model_type, Some("embeddings".to_string()));
assert_eq!(caps.limits.max_inputs, Some(2048));
assert_eq!(caps.supports.dimensions, Some(true));
}
#[test]
fn test_model_with_policy() {
let json = r#"{
"id": "claude-3.5-sonnet",
"object": "model",
"policy": {
"state": "active",
"terms": "https://example.com/terms"
}
}"#;
let model: Model = serde_json::from_str(json).unwrap();
let policy = model.policy.as_ref().unwrap();
assert_eq!(policy.state, Some("active".to_string()));
assert!(policy.terms.as_ref().unwrap().contains("terms"));
}
#[test]
fn test_chat_request_json_format() {
let request = ChatCompletionRequest {
messages: vec![RequestMessage {
role: "user".to_string(),
content: Some(RequestContent::Text("Test".to_string())),
name: None,
tool_calls: None,
tool_call_id: None,
cache_control: None,
}],
model: "gpt-4o".to_string(),
..Default::default()
};
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"messages\""));
assert!(json.contains("\"model\":\"gpt-4o\""));
assert!(!json.contains("\"temperature\":null"));
assert!(!json.contains("\"max_tokens\":null"));
assert!(!json.contains("\"tools\":null"));
assert!(!json.contains("\"stop\":null"));
}
#[test]
fn test_chat_request_with_tools_format() {
let request = ChatCompletionRequest {
messages: vec![RequestMessage {
role: "user".to_string(),
content: Some(RequestContent::Text("Search for files".to_string())),
name: None,
tool_calls: None,
tool_call_id: None,
cache_control: None,
}],
model: "gpt-4o".to_string(),
tools: Some(vec![RequestTool {
tool_type: "function".to_string(),
function: RequestFunction {
name: "file_search".to_string(),
description: "Search for files in workspace".to_string(),
parameters: serde_json::json!({
"type": "object",
"properties": {
"query": { "type": "string" }
}
}),
strict: None,
},
}]),
..Default::default()
};
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("\"type\":\"function\""));
assert!(json.contains("\"name\":\"file_search\""));
assert!(json.contains("\"description\":\"Search for files in workspace\""));
assert!(json.contains("\"parameters\""));
assert!(!json.contains("\"strict\":null"));
}
#[test]
fn test_response_null_content_no_tools() {
let json = r#"{
"id": "chatcmpl-edge1",
"model": "gpt-4o",
"choices": [{
"index": 0,
"message": {"role": "assistant", "content": null},
"finish_reason": "stop"
}],
"usage": {"prompt_tokens": 5, "completion_tokens": 0, "total_tokens": 5}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-edge1");
assert_eq!(response.choices.len(), 1);
assert!(response.choices[0].message.content.is_none());
assert!(response.choices[0].message.tool_calls.is_none());
let usage = response.usage.as_ref().unwrap();
assert_eq!(usage.completion_tokens, 0);
}
#[test]
fn test_response_empty_choices() {
let json = r#"{
"id": "chatcmpl-edge2",
"model": "gpt-4o",
"choices": [],
"usage": {"prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-edge2");
assert!(response.choices.is_empty());
assert_eq!(response.choices.len(), 0);
assert!(response.choices.is_empty());
}
#[test]
fn test_response_minimal_fields() {
let json = r#"{
"id": "chatcmpl-minimal",
"model": "gpt-4o-mini",
"choices": [{
"index": 0,
"message": {"role": "assistant", "content": "Hello"},
"finish_reason": "stop"
}]
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.id, "chatcmpl-minimal");
assert_eq!(response.model, "gpt-4o-mini");
assert_eq!(response.choices.len(), 1);
assert!(response.usage.is_none());
assert!(response.object.is_none());
assert!(response.created.is_none());
}
#[test]
fn test_response_missing_index_anthropic() {
let json = r#"{
"id": "msg_claude_01",
"model": "claude-haiku-4.5",
"choices": [{
"message": {
"role": "assistant",
"content": "Hello! I can help you with that."
},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 8,
"total_tokens": 18
}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.choices.len(), 1);
assert_eq!(response.choices[0].index, None); assert_eq!(
response.choices[0].message.content,
Some("Hello! I can help you with that.".to_string())
);
assert_eq!(response.choices[0].finish_reason, Some("stop".to_string()));
}
#[test]
fn test_response_anthropic_split_choices() {
let json = r#"{
"id": "msg_haiku_split",
"model": "claude-haiku-4.5",
"choices": [
{
"finish_reason": "tool_calls",
"message": {
"content": "I'll examine the file to understand its structure",
"role": "assistant"
}
},
{
"finish_reason": "tool_calls",
"message": {
"role": "assistant",
"tool_calls": [{
"function": {
"arguments": "{\"path\":\"demo/game.js\"}",
"name": "read_file"
},
"id": "toolu_01ABC123",
"type": "function"
}]
}
}
],
"created": 1768984171,
"usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150
}
}"#;
let response: ChatCompletionResponse = serde_json::from_str(json).unwrap();
assert_eq!(response.choices.len(), 2);
assert!(response.choices[0].message.content.is_some());
assert!(response.choices[0].message.tool_calls.is_none());
assert!(response.choices[1].message.content.is_none());
assert!(response.choices[1].message.tool_calls.is_some());
assert_eq!(
response.choices[1]
.message
.tool_calls
.as_ref()
.unwrap()
.len(),
1
);
}
#[test]
fn test_tool_result_message_format() {
let message = RequestMessage {
role: "tool".to_string(),
content: Some(RequestContent::Text("File found: src/main.rs".to_string())),
name: None,
tool_calls: None,
tool_call_id: Some("call_abc123".to_string()),
cache_control: None,
};
let json = serde_json::to_string(&message).unwrap();
assert!(json.contains("\"role\":\"tool\""));
assert!(json.contains("\"tool_call_id\":\"call_abc123\""));
assert!(json.contains("File found: src/main.rs"));
assert!(!json.contains("\"name\":null"));
assert!(!json.contains("\"tool_calls\":null"));
}
#[test]
fn test_tool_result_id_preserved() {
let original_id = "call_xYz789AbC";
let message = RequestMessage {
role: "tool".to_string(),
content: Some(RequestContent::Text("Result data".to_string())),
name: None,
tool_calls: None,
tool_call_id: Some(original_id.to_string()),
cache_control: None,
};
let json = serde_json::to_string(&message).unwrap();
assert!(
json.contains(&format!("\"tool_call_id\":\"{}\"", original_id)),
"tool_call_id must be preserved exactly in serialization"
);
assert_eq!(
json.matches(original_id).count(),
1,
"tool_call_id should appear exactly once"
);
}
#[test]
fn test_tool_result_with_unicode() {
let unicode_content = "搜索结果: 找到 3 个文件 🎯";
let message = RequestMessage {
role: "tool".to_string(),
content: Some(RequestContent::Text(unicode_content.to_string())),
name: None,
tool_calls: None,
tool_call_id: Some("call_search".to_string()),
cache_control: None,
};
let json = serde_json::to_string(&message).unwrap();
let _ = serde_json::from_str::<serde_json::Value>(&json).unwrap();
assert!(
json.contains(unicode_content),
"Unicode content must be preserved in JSON"
);
}
}