Skip to main content

embacle_server/
openai_types.rs

1// ABOUTME: OpenAI-compatible request/response envelope types for the REST API
2// ABOUTME: Maps between OpenAI chat completion format and embacle ChatRequest/ChatResponse
3//
4// SPDX-License-Identifier: Apache-2.0
5// Copyright (c) 2026 dravr.ai
6
7use serde::{Deserialize, Serialize};
8
9// ============================================================================
10// Request Types
11// ============================================================================
12
13/// OpenAI-compatible chat completion request
14///
15/// Accepts either a single model string or an array of model strings
16/// for multiplex mode. Each model string may contain a provider prefix
17/// (e.g., "copilot:gpt-4o") parsed by the provider resolver.
18#[derive(Debug, Deserialize)]
19pub struct ChatCompletionRequest {
20    /// Model identifier(s) — single string or array for multiplex
21    pub model: ModelField,
22    /// Conversation messages
23    pub messages: Vec<ChatCompletionMessage>,
24    /// Whether to stream the response
25    #[serde(default)]
26    pub stream: bool,
27    /// Temperature for response randomness (0.0 - 2.0)
28    #[serde(default)]
29    pub temperature: Option<f32>,
30    /// Maximum tokens to generate
31    #[serde(default)]
32    pub max_tokens: Option<u32>,
33    /// Enable strict capability checking (reject unsupported parameters)
34    #[serde(default)]
35    pub strict_capabilities: Option<bool>,
36    /// Tool definitions for function calling
37    #[serde(default)]
38    pub tools: Option<Vec<ToolDefinition>>,
39    /// Controls which tools the model may call
40    #[serde(default)]
41    pub tool_choice: Option<ToolChoice>,
42    /// Controls the response format (text, `json_object`, or `json_schema`)
43    #[serde(default)]
44    pub response_format: Option<ResponseFormatRequest>,
45    /// Nucleus sampling parameter (0.0 - 1.0)
46    #[serde(default)]
47    pub top_p: Option<f32>,
48    /// Stop sequence(s) that halt generation — single string or array
49    #[serde(default)]
50    pub stop: Option<StopField>,
51}
52
53/// Stop field that accepts either a single string or an array of strings
54///
55/// Per `OpenAI` spec, the `stop` parameter can be a string or an array of
56/// up to 4 strings.
57#[derive(Debug, Clone, Deserialize)]
58#[serde(untagged)]
59pub enum StopField {
60    /// Single stop sequence
61    Single(String),
62    /// Multiple stop sequences
63    Multiple(Vec<String>),
64}
65
66/// OpenAI-specified maximum number of stop sequences
67const MAX_STOP_SEQUENCES: usize = 4;
68
69impl StopField {
70    /// Number of stop sequences in this field
71    pub const fn len(&self) -> usize {
72        match self {
73            Self::Single(_) => 1,
74            Self::Multiple(v) => v.len(),
75        }
76    }
77
78    /// Returns true if no stop sequences are present
79    pub const fn is_empty(&self) -> bool {
80        matches!(self, Self::Multiple(v) if v.is_empty())
81    }
82
83    /// Convert to a Vec of strings regardless of the variant,
84    /// truncating to the `OpenAI`-specified maximum of 4 sequences
85    pub fn into_vec(self) -> Vec<String> {
86        match self {
87            Self::Single(s) => vec![s],
88            Self::Multiple(v) => v.into_iter().take(MAX_STOP_SEQUENCES).collect(),
89        }
90    }
91
92    /// Clone only the bounded subset (up to 4 sequences) without copying
93    /// the entire input — safe for use with user-controlled data
94    pub fn to_bounded_vec(&self) -> Vec<String> {
95        match self {
96            Self::Single(s) => vec![s.clone()],
97            Self::Multiple(v) => v.iter().take(MAX_STOP_SEQUENCES).cloned().collect(),
98        }
99    }
100}
101
102/// OpenAI-compatible response format request
103#[derive(Debug, Clone, Deserialize)]
104#[serde(tag = "type")]
105pub enum ResponseFormatRequest {
106    /// Default text response
107    #[serde(rename = "text")]
108    Text,
109    /// Force JSON object output
110    #[serde(rename = "json_object")]
111    JsonObject,
112    /// Force JSON output conforming to a specific schema
113    #[serde(rename = "json_schema")]
114    JsonSchema {
115        /// The JSON schema specification
116        json_schema: JsonSchemaSpec,
117    },
118}
119
120/// JSON schema specification within a response format request
121#[derive(Debug, Clone, Deserialize)]
122pub struct JsonSchemaSpec {
123    /// Schema name for identification
124    pub name: String,
125    /// The JSON Schema definition
126    pub schema: serde_json::Value,
127}
128
129/// A model field that can be either a single string or an array of strings
130#[derive(Debug, Clone, Deserialize)]
131#[serde(untagged)]
132pub enum ModelField {
133    /// Single model string (standard `OpenAI`)
134    Single(String),
135    /// Array of model strings (multiplex extension)
136    Multiple(Vec<String>),
137}
138
139/// Message content that can be either a plain string or an array of content parts
140///
141/// Per the `OpenAI` API spec, the `content` field of a message can be either a simple
142/// string or an array of typed content parts (text, `image_url`, etc.).
143#[derive(Debug, Clone, Deserialize)]
144#[serde(untagged)]
145pub enum MessageContent {
146    /// Plain text content
147    Text(String),
148    /// Array of typed content parts (text, `image_url`, etc.)
149    Parts(Vec<ContentPart>),
150}
151
152impl MessageContent {
153    /// Extract the text content, concatenating text parts if multipart
154    pub fn as_text(&self) -> String {
155        match self {
156            Self::Text(s) => s.clone(),
157            Self::Parts(parts) => parts
158                .iter()
159                .filter_map(|p| match p {
160                    ContentPart::Text { text } => Some(text.as_str()),
161                    ContentPart::ImageUrl { .. } => None,
162                })
163                .collect::<Vec<_>>()
164                .join(""),
165        }
166    }
167}
168
169/// A single content part within a multipart message
170#[derive(Debug, Clone, Deserialize)]
171#[serde(tag = "type")]
172pub enum ContentPart {
173    /// Text content part
174    #[serde(rename = "text")]
175    Text {
176        /// The text content
177        text: String,
178    },
179    /// Image URL content part (including data URIs)
180    #[serde(rename = "image_url")]
181    ImageUrl {
182        /// Image URL details
183        image_url: ImageUrlDetail,
184    },
185}
186
187/// Image URL details within a content part
188#[derive(Debug, Clone, Deserialize)]
189pub struct ImageUrlDetail {
190    /// The image URL (can be a data URI like `data:image/png;base64,...`)
191    pub url: String,
192}
193
194/// OpenAI-compatible message in a chat completion request
195#[derive(Debug, Clone, Deserialize)]
196pub struct ChatCompletionMessage {
197    /// Role: "system", "user", "assistant", or "tool"
198    pub role: String,
199    /// Message content (None for tool-call-only assistant messages)
200    pub content: Option<MessageContent>,
201    /// Tool calls requested by the assistant
202    #[serde(default)]
203    pub tool_calls: Option<Vec<ToolCall>>,
204    /// ID of the tool call this message responds to (role="tool")
205    #[serde(default)]
206    pub tool_call_id: Option<String>,
207    /// Function name for tool result messages
208    #[serde(default)]
209    pub name: Option<String>,
210}
211
212// ============================================================================
213// Tool Calling Types
214// ============================================================================
215
216/// A tool definition in the `OpenAI` format
217#[derive(Debug, Clone, Deserialize)]
218pub struct ToolDefinition {
219    /// Tool type (always "function" currently)
220    #[serde(rename = "type")]
221    pub tool_type: String,
222    /// Function definition
223    pub function: FunctionObject,
224}
225
226/// A function definition within a tool
227#[derive(Debug, Clone, Deserialize)]
228pub struct FunctionObject {
229    /// Name of the function
230    pub name: String,
231    /// Description of what the function does
232    #[serde(default)]
233    pub description: Option<String>,
234    /// JSON Schema for the function parameters
235    #[serde(default)]
236    pub parameters: Option<serde_json::Value>,
237}
238
239/// Controls which tools the model may call
240#[derive(Debug, Clone, Deserialize)]
241#[serde(untagged)]
242pub enum ToolChoice {
243    /// String variant: "none", "auto", or "required"
244    Mode(String),
245    /// Specific function variant: {"type": "function", "function": {"name": "..."}}
246    Specific(ToolChoiceSpecific),
247}
248
249/// A specific tool choice forcing a particular function
250#[derive(Debug, Clone, Deserialize)]
251pub struct ToolChoiceSpecific {
252    /// Tool type (always "function")
253    #[serde(rename = "type")]
254    pub tool_type: String,
255    /// Function to force
256    pub function: ToolChoiceFunction,
257}
258
259/// Function name within a specific tool choice
260#[derive(Debug, Clone, Deserialize)]
261pub struct ToolChoiceFunction {
262    /// Name of the function to call
263    pub name: String,
264}
265
266/// A tool call issued by the assistant
267#[derive(Debug, Clone, Serialize, Deserialize)]
268pub struct ToolCall {
269    /// Position index of this tool call in the array (required by `OpenAI` spec)
270    #[serde(default)]
271    pub index: usize,
272    /// Unique identifier for this tool call
273    pub id: String,
274    /// Tool type (always "function")
275    #[serde(rename = "type")]
276    pub tool_type: String,
277    /// Function call details
278    pub function: ToolCallFunction,
279}
280
281/// Function call details within a tool call
282#[derive(Debug, Clone, Serialize, Deserialize)]
283pub struct ToolCallFunction {
284    /// Name of the function to call
285    pub name: String,
286    /// JSON-encoded arguments
287    pub arguments: String,
288}
289
290// ============================================================================
291// Response Types (non-streaming)
292// ============================================================================
293
294/// OpenAI-compatible chat completion response
295#[derive(Debug, Serialize)]
296pub struct ChatCompletionResponse {
297    /// Unique response identifier
298    pub id: String,
299    /// Object type (always "chat.completion")
300    pub object: &'static str,
301    /// Unix timestamp of creation
302    pub created: u64,
303    /// Model used for generation
304    pub model: String,
305    /// Response choices (always one for embacle)
306    pub choices: Vec<Choice>,
307    /// Token usage statistics
308    #[serde(skip_serializing_if = "Option::is_none")]
309    pub usage: Option<Usage>,
310    /// Warnings about unsupported request parameters
311    #[serde(skip_serializing_if = "Option::is_none")]
312    pub warnings: Option<Vec<String>>,
313}
314
315/// A single choice in a chat completion response
316#[derive(Debug, Serialize)]
317pub struct Choice {
318    /// Choice index (always 0)
319    pub index: u32,
320    /// Generated message
321    pub message: ResponseMessage,
322    /// Reason the generation stopped
323    pub finish_reason: Option<String>,
324}
325
326/// Message in a chat completion response
327#[derive(Debug, Serialize)]
328pub struct ResponseMessage {
329    /// Role (always "assistant")
330    pub role: &'static str,
331    /// Generated content (None when `tool_calls` are present)
332    #[serde(skip_serializing_if = "Option::is_none")]
333    pub content: Option<String>,
334    /// Tool calls requested by the assistant
335    #[serde(skip_serializing_if = "Option::is_none")]
336    pub tool_calls: Option<Vec<ToolCall>>,
337}
338
339/// Token usage statistics
340#[derive(Debug, Serialize)]
341pub struct Usage {
342    /// Tokens in the prompt
343    #[serde(rename = "prompt_tokens")]
344    pub prompt: u32,
345    /// Tokens in the completion
346    #[serde(rename = "completion_tokens")]
347    pub completion: u32,
348    /// Total tokens
349    #[serde(rename = "total_tokens")]
350    pub total: u32,
351}
352
353// ============================================================================
354// Streaming Response Types
355// ============================================================================
356
357/// OpenAI-compatible streaming chunk
358#[derive(Debug, Serialize)]
359pub struct ChatCompletionChunk {
360    /// Unique response identifier (same across all chunks)
361    pub id: String,
362    /// Object type (always "chat.completion.chunk")
363    pub object: &'static str,
364    /// Unix timestamp of creation
365    pub created: u64,
366    /// Model used for generation
367    pub model: String,
368    /// Streaming choices
369    pub choices: Vec<ChunkChoice>,
370}
371
372/// A single choice in a streaming chunk
373#[derive(Debug, Serialize)]
374pub struct ChunkChoice {
375    /// Choice index (always 0)
376    pub index: u32,
377    /// Content delta
378    pub delta: Delta,
379    /// Reason the generation stopped (only on final chunk)
380    pub finish_reason: Option<String>,
381}
382
383/// Delta content in a streaming chunk
384#[derive(Debug, Serialize)]
385pub struct Delta {
386    /// Role (only present on first chunk)
387    #[serde(skip_serializing_if = "Option::is_none")]
388    pub role: Option<&'static str>,
389    /// Content token (empty string on role-only or final chunk)
390    #[serde(skip_serializing_if = "Option::is_none")]
391    pub content: Option<String>,
392    /// Tool calls (reserved for future streaming tool call support)
393    #[serde(skip_serializing_if = "Option::is_none")]
394    pub tool_calls: Option<Vec<ToolCall>>,
395}
396
397// ============================================================================
398// Multiplex Response (non-standard extension)
399// ============================================================================
400
401/// Response for multiplex requests (multiple providers)
402#[derive(Debug, Serialize)]
403pub struct MultiplexResponse {
404    /// Unique response identifier
405    pub id: String,
406    /// Object type (always "chat.completion.multiplex")
407    pub object: &'static str,
408    /// Unix timestamp of creation
409    pub created: u64,
410    /// Per-provider results
411    pub results: Vec<MultiplexProviderResult>,
412    /// Human-readable summary
413    pub summary: String,
414}
415
416/// Result from a single provider in a multiplex request
417#[derive(Debug, Serialize)]
418pub struct MultiplexProviderResult {
419    /// Provider identifier
420    pub provider: String,
421    /// Model used
422    #[serde(skip_serializing_if = "Option::is_none")]
423    pub model: Option<String>,
424    /// Response content (None on failure)
425    #[serde(skip_serializing_if = "Option::is_none")]
426    pub content: Option<String>,
427    /// Error message (None on success)
428    #[serde(skip_serializing_if = "Option::is_none")]
429    pub error: Option<String>,
430    /// Wall-clock time in milliseconds
431    pub duration_ms: u64,
432}
433
434// ============================================================================
435// Models Endpoint
436// ============================================================================
437
438/// Response for GET /v1/models
439#[derive(Debug, Serialize)]
440pub struct ModelsResponse {
441    /// Object type (always "list")
442    pub object: &'static str,
443    /// Available models
444    pub data: Vec<ModelObject>,
445}
446
447/// A single model entry in the models list
448#[derive(Debug, Serialize)]
449pub struct ModelObject {
450    /// Model identifier (e.g., "copilot:gpt-4o")
451    pub id: String,
452    /// Object type (always "model")
453    pub object: &'static str,
454    /// Owner/provider name
455    pub owned_by: String,
456}
457
458// ============================================================================
459// Health Endpoint
460// ============================================================================
461
462/// Response for GET /health
463#[derive(Debug, Serialize)]
464pub struct HealthResponse {
465    /// Overall status
466    pub status: &'static str,
467    /// Per-provider readiness
468    pub providers: std::collections::HashMap<String, String>,
469}
470
471// ============================================================================
472// Error Response
473// ============================================================================
474
475/// OpenAI-compatible error response
476#[derive(Debug, Serialize)]
477pub struct ErrorResponse {
478    /// Error details
479    pub error: ErrorDetail,
480}
481
482/// Error detail within an `OpenAI` error response
483#[derive(Debug, Serialize)]
484pub struct ErrorDetail {
485    /// Error message
486    pub message: String,
487    /// Error type
488    #[serde(rename = "type")]
489    pub error_type: String,
490    /// Parameter that caused the error (if applicable)
491    #[serde(skip_serializing_if = "Option::is_none")]
492    pub param: Option<String>,
493    /// Error code
494    #[serde(skip_serializing_if = "Option::is_none")]
495    pub code: Option<String>,
496}
497
498impl ErrorResponse {
499    /// Build an error response with the given type and message
500    pub fn new(error_type: impl Into<String>, message: impl Into<String>) -> Self {
501        Self {
502            error: ErrorDetail {
503                message: message.into(),
504                error_type: error_type.into(),
505                param: None,
506                code: None,
507            },
508        }
509    }
510}
511
512#[cfg(test)]
513mod tests {
514    use super::*;
515
516    #[test]
517    fn deserialize_single_model() {
518        let json = r#"{"model":"copilot:gpt-4o","messages":[{"role":"user","content":"hi"}]}"#;
519        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
520        match req.model {
521            ModelField::Single(m) => assert_eq!(m, "copilot:gpt-4o"),
522            ModelField::Multiple(_) => panic!("expected single"),
523        }
524        assert!(!req.stream);
525    }
526
527    #[test]
528    fn deserialize_multiple_models() {
529        let json = r#"{"model":["copilot:gpt-4o","claude:opus"],"messages":[{"role":"user","content":"hi"}]}"#;
530        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
531        match req.model {
532            ModelField::Multiple(models) => {
533                assert_eq!(models.len(), 2);
534                assert_eq!(models[0], "copilot:gpt-4o");
535                assert_eq!(models[1], "claude:opus");
536            }
537            ModelField::Single(_) => panic!("expected multiple"),
538        }
539    }
540
541    #[test]
542    fn deserialize_with_stream_flag() {
543        let json =
544            r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"stream":true}"#;
545        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
546        assert!(req.stream);
547    }
548
549    #[test]
550    fn deserialize_message_with_null_content() {
551        let json = r#"{"model":"copilot","messages":[{"role":"assistant","content":null,"tool_calls":[{"id":"call_1","type":"function","function":{"name":"search","arguments":"{}"}}]}]}"#;
552        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
553        assert!(req.messages[0].content.is_none());
554        assert!(req.messages[0].tool_calls.is_some());
555    }
556
557    #[test]
558    fn deserialize_message_without_content_field() {
559        let json = r#"{"model":"copilot","messages":[{"role":"tool","tool_call_id":"call_1","name":"search","content":"{\"result\":\"found\"}"}]}"#;
560        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
561        assert_eq!(req.messages[0].role, "tool");
562        assert_eq!(req.messages[0].tool_call_id.as_deref(), Some("call_1"));
563        assert_eq!(req.messages[0].name.as_deref(), Some("search"));
564    }
565
566    #[test]
567    fn deserialize_multipart_content() {
568        let json = r#"{
569            "model": "copilot",
570            "messages": [{
571                "role": "user",
572                "content": [
573                    {"type": "text", "text": "What is in this image?"},
574                    {"type": "image_url", "image_url": {"url": "data:image/png;base64,aGVsbG8="}}
575                ]
576            }]
577        }"#;
578        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
579        let content = req.messages[0].content.as_ref().expect("content present");
580        match content {
581            MessageContent::Parts(parts) => {
582                assert_eq!(parts.len(), 2);
583                assert!(
584                    matches!(&parts[0], ContentPart::Text { text } if text == "What is in this image?")
585                );
586                assert!(
587                    matches!(&parts[1], ContentPart::ImageUrl { image_url } if image_url.url.contains("base64"))
588                );
589            }
590            MessageContent::Text(_) => panic!("expected Parts variant"),
591        }
592    }
593
594    #[test]
595    fn message_content_as_text_plain_string() {
596        let content = MessageContent::Text("hello".to_owned());
597        assert_eq!(content.as_text(), "hello");
598    }
599
600    #[test]
601    fn message_content_as_text_multipart() {
602        let content = MessageContent::Parts(vec![
603            ContentPart::Text {
604                text: "describe ".to_owned(),
605            },
606            ContentPart::ImageUrl {
607                image_url: ImageUrlDetail {
608                    url: "data:image/png;base64,abc".to_owned(),
609                },
610            },
611            ContentPart::Text {
612                text: "this image".to_owned(),
613            },
614        ]);
615        assert_eq!(content.as_text(), "describe this image");
616    }
617
618    #[test]
619    fn deserialize_plain_string_content_backward_compat() {
620        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}]}"#;
621        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
622        match req.messages[0].content.as_ref().expect("content present") {
623            MessageContent::Text(s) => assert_eq!(s, "hi"),
624            MessageContent::Parts(_) => panic!("expected Text variant"),
625        }
626    }
627
628    #[test]
629    fn deserialize_tool_definitions() {
630        let json = r#"{
631            "model": "copilot",
632            "messages": [{"role": "user", "content": "hi"}],
633            "tools": [{
634                "type": "function",
635                "function": {
636                    "name": "get_weather",
637                    "description": "Get weather for a city",
638                    "parameters": {"type": "object", "properties": {"city": {"type": "string"}}, "required": ["city"]}
639                }
640            }]
641        }"#;
642        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
643        let tools = req.tools.expect("tools present");
644        assert_eq!(tools.len(), 1);
645        assert_eq!(tools[0].tool_type, "function");
646        assert_eq!(tools[0].function.name, "get_weather");
647        assert!(tools[0].function.parameters.is_some());
648    }
649
650    #[test]
651    fn deserialize_tool_choice_auto() {
652        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"tool_choice":"auto"}"#;
653        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
654        match req.tool_choice.expect("tool_choice present") {
655            ToolChoice::Mode(m) => assert_eq!(m, "auto"),
656            ToolChoice::Specific(_) => panic!("expected mode"),
657        }
658    }
659
660    #[test]
661    fn deserialize_tool_choice_specific() {
662        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"tool_choice":{"type":"function","function":{"name":"get_weather"}}}"#;
663        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
664        match req.tool_choice.expect("tool_choice present") {
665            ToolChoice::Specific(s) => assert_eq!(s.function.name, "get_weather"),
666            ToolChoice::Mode(_) => panic!("expected specific"),
667        }
668    }
669
670    #[test]
671    fn serialize_completion_response() {
672        let resp = ChatCompletionResponse {
673            id: "chatcmpl-test".to_owned(),
674            object: "chat.completion",
675            created: 1_700_000_000,
676            model: "copilot:gpt-4o".to_owned(),
677            choices: vec![Choice {
678                index: 0,
679                message: ResponseMessage {
680                    role: "assistant",
681                    content: Some("Hello!".to_owned()),
682                    tool_calls: None,
683                },
684                finish_reason: Some("stop".to_owned()),
685            }],
686            usage: None,
687            warnings: None,
688        };
689        let json = serde_json::to_string(&resp).expect("serialize");
690        assert!(json.contains("chat.completion"));
691        assert!(json.contains("Hello!"));
692        assert!(!json.contains("tool_calls"));
693    }
694
695    #[test]
696    fn serialize_response_with_tool_calls() {
697        let resp = ChatCompletionResponse {
698            id: "chatcmpl-test".to_owned(),
699            object: "chat.completion",
700            created: 1_700_000_000,
701            model: "copilot:gpt-4o".to_owned(),
702            choices: vec![Choice {
703                index: 0,
704                message: ResponseMessage {
705                    role: "assistant",
706                    content: None,
707                    tool_calls: Some(vec![ToolCall {
708                        index: 0,
709                        id: "call_abc123".to_owned(),
710                        tool_type: "function".to_owned(),
711                        function: ToolCallFunction {
712                            name: "get_weather".to_owned(),
713                            arguments: r#"{"city":"Paris"}"#.to_owned(),
714                        },
715                    }]),
716                },
717                finish_reason: Some("tool_calls".to_owned()),
718            }],
719            usage: None,
720            warnings: None,
721        };
722        let json = serde_json::to_string(&resp).expect("serialize");
723        assert!(json.contains("tool_calls"));
724        assert!(json.contains("call_abc123"));
725        assert!(json.contains("get_weather"));
726        assert!(!json.contains(r#""content""#));
727    }
728
729    #[test]
730    fn serialize_error_response() {
731        let resp = ErrorResponse::new("invalid_request_error", "Unknown model");
732        let json = serde_json::to_string(&resp).expect("serialize");
733        assert!(json.contains("invalid_request_error"));
734        assert!(json.contains("Unknown model"));
735    }
736
737    #[test]
738    fn serialize_chunk_response() {
739        let chunk = ChatCompletionChunk {
740            id: "chatcmpl-test".to_owned(),
741            object: "chat.completion.chunk",
742            created: 1_700_000_000,
743            model: "copilot".to_owned(),
744            choices: vec![ChunkChoice {
745                index: 0,
746                delta: Delta {
747                    role: None,
748                    content: Some("token".to_owned()),
749                    tool_calls: None,
750                },
751                finish_reason: None,
752            }],
753        };
754        let json = serde_json::to_string(&chunk).expect("serialize");
755        assert!(json.contains("chat.completion.chunk"));
756        assert!(json.contains("token"));
757        assert!(!json.contains("tool_calls"));
758    }
759
760    #[test]
761    fn deserialize_tool_choice_none() {
762        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"tool_choice":"none"}"#;
763        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
764        match req.tool_choice.expect("tool_choice present") {
765            ToolChoice::Mode(m) => assert_eq!(m, "none"),
766            ToolChoice::Specific(_) => panic!("expected mode"),
767        }
768    }
769
770    #[test]
771    fn deserialize_tool_choice_required() {
772        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"tool_choice":"required"}"#;
773        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
774        match req.tool_choice.expect("tool_choice present") {
775            ToolChoice::Mode(m) => assert_eq!(m, "required"),
776            ToolChoice::Specific(_) => panic!("expected mode"),
777        }
778    }
779
780    #[test]
781    fn deserialize_response_format_text() {
782        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"response_format":{"type":"text"}}"#;
783        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
784        assert!(matches!(
785            req.response_format,
786            Some(ResponseFormatRequest::Text)
787        ));
788    }
789
790    #[test]
791    fn deserialize_response_format_json_object() {
792        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"response_format":{"type":"json_object"}}"#;
793        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
794        assert!(matches!(
795            req.response_format,
796            Some(ResponseFormatRequest::JsonObject)
797        ));
798    }
799
800    #[test]
801    fn deserialize_response_format_json_schema() {
802        let json = r#"{
803            "model": "copilot",
804            "messages": [{"role": "user", "content": "hi"}],
805            "response_format": {
806                "type": "json_schema",
807                "json_schema": {
808                    "name": "weather",
809                    "schema": {"type": "object", "properties": {"temp": {"type": "number"}}}
810                }
811            }
812        }"#;
813        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
814        match req.response_format {
815            Some(ResponseFormatRequest::JsonSchema { json_schema }) => {
816                assert_eq!(json_schema.name, "weather");
817                assert!(json_schema.schema["properties"]["temp"].is_object());
818            }
819            other => panic!("expected JsonSchema, got: {other:?}"),
820        }
821    }
822
823    #[test]
824    fn deserialize_top_p() {
825        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"top_p":0.9}"#;
826        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
827        assert_eq!(req.top_p, Some(0.9));
828    }
829
830    #[test]
831    fn deserialize_stop_single() {
832        let json =
833            r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"stop":"END"}"#;
834        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
835        let stop = req.stop.expect("stop present");
836        assert_eq!(stop.into_vec(), vec!["END"]);
837    }
838
839    #[test]
840    fn deserialize_stop_array() {
841        let json = r#"{"model":"copilot","messages":[{"role":"user","content":"hi"}],"stop":["END","STOP"]}"#;
842        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
843        let stop = req.stop.expect("stop present");
844        assert_eq!(stop.into_vec(), vec!["END", "STOP"]);
845    }
846
847    #[test]
848    fn stop_field_len() {
849        let single = StopField::Single("END".to_owned());
850        assert_eq!(single.len(), 1);
851        let multiple = StopField::Multiple(vec!["A".to_owned(), "B".to_owned(), "C".to_owned()]);
852        assert_eq!(multiple.len(), 3);
853    }
854
855    #[test]
856    fn stop_field_into_vec_truncates_at_four() {
857        let oversized = StopField::Multiple((0..10).map(|i| format!("stop_{i}")).collect());
858        let result = oversized.into_vec();
859        assert_eq!(result.len(), 4);
860        assert_eq!(result[0], "stop_0");
861        assert_eq!(result[3], "stop_3");
862    }
863
864    #[test]
865    fn deserialize_all_optional_fields() {
866        let json = r#"{
867            "model": "copilot",
868            "messages": [{"role": "user", "content": "hi"}],
869            "temperature": 0.7,
870            "max_tokens": 100,
871            "top_p": 0.95,
872            "stop": ["END"],
873            "stream": true
874        }"#;
875        let req: ChatCompletionRequest = serde_json::from_str(json).expect("deserialize");
876        assert_eq!(req.temperature, Some(0.7));
877        assert_eq!(req.max_tokens, Some(100));
878        assert_eq!(req.top_p, Some(0.95));
879        assert!(req.stop.is_some());
880        assert!(req.stream);
881    }
882
883    #[test]
884    fn serialize_models_response() {
885        let resp = ModelsResponse {
886            object: "list",
887            data: vec![ModelObject {
888                id: "copilot:gpt-4o".to_owned(),
889                object: "model",
890                owned_by: "copilot".to_owned(),
891            }],
892        };
893        let json = serde_json::to_string(&resp).expect("serialize");
894        assert!(json.contains("copilot:gpt-4o"));
895    }
896}