ds_api/raw/request/chat_completion.rs
1use serde::{Deserialize, Serialize};
2use serde_json::{Map, Value};
3
4use super::{
5 message::Message, model::Model, response_format::ResponseFormat, stop::Stop,
6 stream_options::StreamOptions, thinking::Thinking, tool::Tool, tool_choice::ToolChoice,
7};
8
9#[derive(Debug, Default, Serialize, Deserialize)]
10#[serde(default)]
11pub struct ChatCompletionRequest {
12 /// List of messages in the conversation.
13 pub messages: Vec<Message>,
14
15 /// The model ID to use. Use `deepseek-chat` for faster responses or `deepseek-reasoner` for deeper reasoning capabilities.
16 pub model: Model,
17
18 /// Controls switching between reasoning (thinking) and non-reasoning modes.
19 #[serde(skip_serializing_if = "Option::is_none")]
20 pub thinking: Option<Thinking>,
21
22 /// Possible values: >= -2 and <= 2
23 /// Default value: 0
24 /// A number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text,
25 /// reducing the chance of repeated content.
26 #[serde(skip_serializing_if = "Option::is_none")]
27 pub frequency_penalty: Option<f32>,
28
29 /// Maximum number of tokens to generate for the completion in a single request.
30 /// The combined length of input and output tokens is limited by the model's context window.
31 /// See documentation for ranges and defaults.
32 #[serde(skip_serializing_if = "Option::is_none")]
33 pub max_tokens: Option<u32>,
34
35 /// Possible values: >= -2 and <= 2
36 /// Default value: 0
37 /// A number between -2.0 and 2.0. Positive values penalize new tokens if they already appear in the text,
38 /// encouraging the model to introduce new topics.
39 #[serde(skip_serializing_if = "Option::is_none")]
40 pub presence_penalty: Option<f32>,
41
42 /// An object specifying the format the model must output.
43 /// Set to `{ "type": "json_object" }` to enable JSON mode which enforces valid JSON output.
44 /// Note: When using JSON mode you must also instruct the model via system or user messages to output JSON.
45 /// Otherwise the model may emit whitespace until token limits are reached which can appear to hang.
46 /// Also, if `finish_reason == "length"`, the output may be truncated due to `max_tokens` or context limits.
47 #[serde(skip_serializing_if = "Option::is_none")]
48 pub response_format: Option<ResponseFormat>,
49
50 /// A string or up to 16 strings. Generation will stop when one of these tokens is encountered.
51 #[serde(skip_serializing_if = "Option::is_none")]
52 pub stop: Option<Stop>,
53
54 /// If true, the response will be streamed as SSE (server-sent events). The stream ends with `data: [DONE]`.
55 #[serde(skip_serializing_if = "Option::is_none")]
56 pub stream: Option<bool>,
57
58 /// Options related to streaming output. Only valid when `stream` is true.
59 /// `include_usage`: boolean
60 /// If true, an extra chunk with `usage` (aggregate token counts) will be sent before the final `data: [DONE]`.
61 /// Other chunks also include `usage` but with a null value.
62 #[serde(skip_serializing_if = "Option::is_none")]
63 pub stream_options: Option<StreamOptions>,
64
65 /// Possible values: <= 2
66 /// Default value: 1
67 /// Sampling temperature between 0 and 2. Higher values (e.g. 0.8) produce more random output;
68 /// lower values (e.g. 0.2) make output more focused and deterministic.
69 /// Typically change either `temperature` or `top_p`, not both.
70 #[serde(skip_serializing_if = "Option::is_none")]
71 pub temperature: Option<f32>,
72
73 /// Possible values: <= 1
74 /// Default value: 1
75 /// An alternative to temperature that considers only the top `p` probability mass.
76 /// For example, `top_p = 0.1` means only tokens comprising the top 10% probability mass are considered.
77 #[serde(skip_serializing_if = "Option::is_none")]
78 pub top_p: Option<f32>,
79
80 /// List of tools the model may call. Currently only `function` is supported.
81 /// Provide a list of functions that accept JSON input. Up to 128 functions are supported.
82 #[serde(skip_serializing_if = "Option::is_none")]
83 pub tools: Option<Vec<Tool>>,
84
85 /// Controls how the model may call tools:
86 /// - `none`: the model will not call tools and will produce a normal message.
87 /// - `auto`: the model can choose to produce a message or call one or more tools.
88 /// - `required`: the model must call one or more tools.
89 ///
90 /// Specifying a particular tool via `{"type":"function","function":{"name":"my_function"}}` forces the model to call that tool.
91 ///
92 /// Default is `none` when no tools exist; when tools exist the default is `auto`.
93 #[serde(skip_serializing_if = "Option::is_none")]
94 pub tool_choice: Option<ToolChoice>,
95
96 /// logprobs boolean NULLABLE
97 /// Return log-probabilities for the output tokens. If true, logprobs for each output token are returned.
98 #[serde(skip_serializing_if = "Option::is_none")]
99 pub logprobs: Option<bool>,
100
101 /// Possible values: <= 20
102 /// An integer N between 0 and 20 that returns the top-N token log-probabilities for each output position.
103 /// When specifying this parameter, `logprobs` must be true.
104 #[serde(skip_serializing_if = "Option::is_none")]
105 pub top_logprobs: Option<u32>,
106
107 /// Extra arbitrary JSON body fields. When set, these key/value pairs are merged
108 /// into the top-level request JSON. Use this to pass provider-specific or
109 /// custom fields not yet modeled by the library.
110 #[serde(flatten, skip_serializing_if = "Option::is_none")]
111 pub extra_body: Option<Map<String, Value>>,
112}
113
114impl ChatCompletionRequest {
115 /// Add a single extra top-level field to the request body (in-place).
116 ///
117 /// This merges the given `key` / `value` pair into the request's
118 /// `extra_body` map, creating that map if necessary. Values placed into
119 /// `extra_body` are serialized into the top-level JSON of the request
120 /// due to `#[serde(flatten)]`, so they appear as peers to fields such as
121 /// `messages` and `model`.
122 ///
123 /// Notes:
124 /// - Do not add keys that intentionally collide with existing top-level
125 /// fields (for example `messages` or `model`) unless you explicitly want
126 /// to override them — such collisions are not recommended.
127 /// - Use this in-place helper when you have a mutable `ChatCompletionRequest`
128 /// instance and want to add a field without consuming the value.
129 ///
130 /// Example:
131 /// ```
132 /// # use ds_api::raw::request::ChatCompletionRequest;
133 /// # use serde_json::json;
134 /// let mut req = ChatCompletionRequest::default();
135 /// req.add_extra_field("provider_opt", json!("x"));
136 /// ```
137 pub fn add_extra_field(&mut self, key: impl Into<String>, value: Value) {
138 if let Some(ref mut m) = self.extra_body {
139 m.insert(key.into(), value);
140 } else {
141 let mut m = Map::new();
142 m.insert(key.into(), value);
143 self.extra_body = Some(m);
144 }
145 }
146
147 /// Builder-style helper to add a single extra field and return the owned
148 /// request for chaining.
149 ///
150 /// This is a convenience that consumes (takes ownership of) `self`, adds
151 /// the given key/value pair to `extra_body`, and returns the modified
152 /// `ChatCompletionRequest` so you can continue chaining builder calls.
153 ///
154 /// Example:
155 /// ```
156 /// # use ds_api::raw::request::ChatCompletionRequest;
157 /// # use serde_json::json;
158 /// let req = ChatCompletionRequest::default()
159 /// .with_extra_field("provider_opt", json!("x"));
160 /// ```
161 pub fn with_extra_field(mut self, key: impl Into<String>, value: Value) -> Self {
162 self.add_extra_field(key, value);
163 self
164 }
165}
166
167#[cfg(test)]
168mod tests {
169 use super::*;
170 use crate::raw::request::message::Role;
171
172 #[test]
173 fn test_chat_completion_request_serialization() {
174 let request = ChatCompletionRequest {
175 messages: vec![Message {
176 role: Role::User,
177 content: Some("Hello, world!".to_string()),
178 name: None,
179 tool_call_id: None,
180 tool_calls: None,
181 reasoning_content: None,
182 prefix: None,
183 }],
184 model: Model::DeepseekChat,
185 thinking: None,
186 frequency_penalty: Some(0.5),
187 max_tokens: Some(100),
188 presence_penalty: None,
189 response_format: None,
190 stop: None,
191 stream: Some(false),
192 stream_options: None,
193 temperature: Some(0.7),
194 top_p: None,
195 tools: None,
196 tool_choice: None,
197 logprobs: None,
198 top_logprobs: None,
199 extra_body: None,
200 };
201
202 let json = serde_json::to_string(&request).unwrap();
203 let parsed: ChatCompletionRequest = serde_json::from_str(&json).unwrap();
204
205 assert_eq!(parsed.messages.len(), 1);
206 assert_eq!(
207 parsed.messages[0].content.as_ref().unwrap(),
208 "Hello, world!"
209 );
210 assert!(matches!(parsed.model, Model::DeepseekChat));
211 assert_eq!(parsed.frequency_penalty, Some(0.5));
212 assert_eq!(parsed.max_tokens, Some(100));
213 assert_eq!(parsed.stream, Some(false));
214 assert_eq!(parsed.temperature, Some(0.7));
215 }
216
217 #[test]
218 fn test_default_chat_completion_request() {
219 let request = ChatCompletionRequest::default();
220
221 assert!(request.messages.is_empty());
222 assert!(matches!(request.model, Model::DeepseekChat));
223 assert!(request.thinking.is_none());
224 assert!(request.frequency_penalty.is_none());
225 assert!(request.max_tokens.is_none());
226 assert!(request.presence_penalty.is_none());
227 assert!(request.response_format.is_none());
228 assert!(request.stop.is_none());
229 assert!(request.stream.is_none());
230 assert!(request.stream_options.is_none());
231 assert!(request.temperature.is_none());
232 assert!(request.top_p.is_none());
233 assert!(request.tools.is_none());
234 assert!(request.tool_choice.is_none());
235 assert!(request.logprobs.is_none());
236 assert!(request.top_logprobs.is_none());
237 assert!(request.extra_body.is_none());
238 }
239
240 #[test]
241 fn test_extra_body_serialize_merge() {
242 use crate::raw::model::Model;
243 use serde_json::{Map, Value, json};
244
245 // Build an extra map
246 let mut extra = Map::<String, Value>::new();
247 extra.insert("x_custom".to_string(), json!("v1"));
248 extra.insert("x_flag".to_string(), json!(true));
249
250 // Create a request with extra_body set
251 let req = ChatCompletionRequest {
252 messages: vec![],
253 model: Model::DeepseekChat,
254 extra_body: Some(extra),
255 ..Default::default()
256 };
257
258 // Serialize to a Value and assert the custom keys are present at top-level
259 let v = serde_json::to_value(&req).expect("serialize");
260 assert_eq!(
261 v.get("x_custom").and_then(|val| val.as_str()).unwrap(),
262 "v1"
263 );
264 assert_eq!(v.get("x_flag").and_then(|val| val.as_bool()).unwrap(), true);
265 }
266}