openllm/api/
chat_completion.rs

1use crate::{IntoRequest, ToSchema};
2use derive_builder::Builder;
3use reqwest_middleware::{ClientWithMiddleware, RequestBuilder};
4use serde::{Deserialize, Serialize};
5use strum::{Display, EnumIter, EnumMessage, EnumString, VariantNames};
6
7#[derive(Debug, Clone, Serialize, Builder)]
8pub struct ChatCompletionRequest {
9    /// A list of messages comprising the conversation so far.
10    #[builder(setter(into))]
11    messages: Vec<ChatCompletionMessage>,
12    /// ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.
13    #[builder(default)]
14    model: ChatCompleteModel,
15    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
16    #[builder(default, setter(strip_option))]
17    #[serde(skip_serializing_if = "Option::is_none")]
18    frequency_penalty: Option<f32>,
19
20    // Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
21    // #[builder(default, setter(strip_option))]
22    // #[serde(skip_serializing_if = "Option::is_none")]
23    // logit_bias: Option<f32>,
24    /// The maximum number of tokens to generate in the chat completion.
25    #[builder(default, setter(strip_option))]
26    #[serde(skip_serializing_if = "Option::is_none")]
27    max_tokens: Option<usize>,
28    /// How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep n as 1 to minimize costs.
29    #[builder(default, setter(strip_option))]
30    #[serde(skip_serializing_if = "Option::is_none")]
31    n: Option<usize>,
32    /// Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
33    #[builder(default, setter(strip_option))]
34    #[serde(skip_serializing_if = "Option::is_none")]
35    presence_penalty: Option<f32>,
36    /// An object specifying the format that the model must output. Setting to { "type": "json_object" } enables JSON mode, which guarantees the message the model generates is valid JSON.
37    #[builder(default, setter(strip_option))]
38    #[serde(skip_serializing_if = "Option::is_none")]
39    response_format: Option<ChatResponseFormatObject>,
40    /// This feature is in Beta. If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same seed and parameters should return the same result. Determinism is not guaranteed, and you should refer to the system_fingerprint response parameter to monitor changes in the backend.
41    #[builder(default, setter(strip_option))]
42    #[serde(skip_serializing_if = "Option::is_none")]
43    seed: Option<usize>,
44    /// Up to 4 sequences where the API will stop generating further tokens.
45    // TODO: make this as an enum
46    #[builder(default, setter(strip_option))]
47    #[serde(skip_serializing_if = "Option::is_none")]
48    stop: Option<String>,
49    /// If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.
50    #[builder(default, setter(strip_option))]
51    #[serde(skip_serializing_if = "Option::is_none")]
52    pub stream: Option<bool>,
53    /// What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both.
54    #[builder(default, setter(strip_option))]
55    #[serde(skip_serializing_if = "Option::is_none")]
56    temperature: Option<f32>,
57    /// An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both.
58    #[builder(default, setter(strip_option))]
59    #[serde(skip_serializing_if = "Option::is_none")]
60    top_p: Option<f32>,
61    /// A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for.
62    #[builder(default, setter(into))]
63    #[serde(skip_serializing_if = "Vec::is_empty")]
64    tools: Vec<Tool>,
65    /// Controls which (if any) function is called by the model. none means the model will not call a function and instead generates a message. auto means the model can pick between generating a message or calling a function. Specifying a particular function via {"type: "function", "function": {"name": "my_function"}} forces the model to call that function. none is the default when no functions are present. auto is the default if functions are present.
66    #[builder(default, setter(strip_option))]
67    #[serde(skip_serializing_if = "Option::is_none")]
68    tool_choice: Option<ToolChoice>,
69    /// A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.
70    #[builder(default, setter(strip_option, into))]
71    #[serde(skip_serializing_if = "Option::is_none")]
72    user: Option<String>,
73}
74
75#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, EnumString, Display, VariantNames)]
76#[serde(rename_all = "snake_case")]
77pub enum ToolChoice {
78    #[default]
79    None,
80    Auto,
81    // TODO: we need something like this: #[serde(tag = "type", content = "function")]
82    Function {
83        name: String,
84    },
85}
86
87#[derive(Debug, Clone, Serialize)]
88pub struct Tool {
89    /// The schema of the tool. Currently, only functions are supported.
90    r#type: ToolType,
91    /// The schema of the tool. Currently, only functions are supported.
92    function: FunctionInfo,
93}
94
95#[derive(Debug, Clone, Serialize)]
96pub struct FunctionInfo {
97    /// A description of what the function does, used by the model to choose when and how to call the function.
98    description: String,
99    /// The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.
100    name: String,
101    /// The parameters the functions accepts, described as a JSON Schema object.
102    parameters: serde_json::Value,
103}
104
105#[derive(Debug, Clone, Serialize)]
106pub struct ChatResponseFormatObject {
107    r#type: ChatResponseFormat,
108}
109
110#[derive(
111    Debug, Clone, Copy, Default, PartialEq, Eq, Serialize, EnumString, Display, VariantNames,
112)]
113#[serde(rename_all = "snake_case")]
114pub enum ChatResponseFormat {
115    Text,
116    #[default]
117    Json,
118}
119
120#[derive(Debug, Clone, Serialize, Display, VariantNames, EnumMessage)]
121#[serde(rename_all = "snake_case", tag = "role")]
122pub enum ChatCompletionMessage {
123    /// A message from a system.
124    System(SystemMessage),
125    /// A message from a human.
126    User(UserMessage),
127    /// A message from the assistant.
128    Assistant(AssistantMessage),
129    /// A message from a tool.
130    Tool(ToolMessage),
131}
132
133#[derive(
134    Debug,
135    Clone,
136    Default,
137    PartialEq,
138    Eq,
139    Serialize,
140    Deserialize,
141    EnumString,
142    EnumIter,
143    Display,
144    VariantNames,
145    EnumMessage,
146)]
147pub enum ChatCompleteModel {
148    /// The default model. Currently, this is the gpt-3.5-turbo-1106 model.
149    #[default]
150    #[serde(rename = "gpt-3.5-turbo-1106")]
151    #[strum(serialize = "gpt-3.5-turbo")]
152    Gpt3Turbo,
153    /// GPT-3.5 turbo model with instruct capability.
154    #[serde(rename = "gpt-3.5-turbo-instruct")]
155    #[strum(serialize = "gpt-3.5-turbo-instruct")]
156    Gpt3TurboInstruct,
157    /// The latest GPT4 model. Currently, this is the gpt-4-1106-preview model.
158    #[serde(rename = "gpt-4-1106-preview")]
159    #[strum(serialize = "gpt-4-turbo")]
160    Gpt4Turbo,
161    /// The latest GPT4 model with vision capability. Currently, this is the gpt-4-1106-vision-preview model.
162    #[serde(rename = "gpt-4-1106-vision-preview")]
163    #[strum(serialize = "gpt-4-turbo-vision")]
164    Gpt4TurboVision,
165
166    #[serde(rename = "deepseek-chat")]
167    #[strum(serialize = "deepseek-chat")]
168    DeepSeekChat,
169
170    #[serde(rename = "deepseek-reasoner")]
171    #[strum(serialize = "deepseek-reasoner")]
172    DeepSeekReasoner,
173
174    #[serde(untagged)]
175    Other(String),
176}
177
178#[derive(Debug, Clone, Serialize)]
179pub struct SystemMessage {
180    /// The contents of the system message.
181    content: String,
182    /// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
183    #[serde(skip_serializing_if = "Option::is_none")]
184    name: Option<String>,
185}
186
187#[derive(Debug, Clone, Serialize)]
188pub struct UserMessage {
189    /// The contents of the user message.
190    content: String,
191    /// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
192    #[serde(skip_serializing_if = "Option::is_none")]
193    name: Option<String>,
194}
195
196#[derive(Debug, Clone, Serialize, Deserialize)]
197pub struct AssistantMessage {
198    /// The contents of the system message.
199    #[serde(default)]
200    pub content: Option<String>,
201    /// An optional name for the participant. Provides the model information to differentiate between participants of the same role.
202    #[serde(skip_serializing_if = "Option::is_none", default)]
203    pub name: Option<String>,
204    /// The tool calls generated by the model, such as function calls.
205    #[serde(skip_serializing_if = "Vec::is_empty", default)]
206    pub tool_calls: Vec<ToolCall>,
207}
208
209#[derive(Debug, Clone, Serialize)]
210pub struct ToolMessage {
211    /// The contents of the tool message.
212    content: String,
213    /// Tool call that this message is responding to.
214    tool_call_id: String,
215}
216
217#[derive(Debug, Clone, Serialize, Deserialize)]
218pub struct ToolCall {
219    /// The ID of the tool call.
220    pub id: String,
221    /// The type of the tool. Currently, only function is supported.
222    pub r#type: ToolType,
223    /// The function that the model called.
224    pub function: FunctionCall,
225}
226
227#[derive(Debug, Clone, Serialize, Deserialize)]
228pub struct FunctionCall {
229    /// The name of the function to call.
230    pub name: String,
231    /// The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.
232    pub arguments: String,
233}
234
235#[derive(
236    Debug,
237    Clone,
238    Copy,
239    PartialEq,
240    Eq,
241    Default,
242    Serialize,
243    Deserialize,
244    EnumString,
245    Display,
246    VariantNames,
247)]
248#[serde(rename_all = "snake_case")]
249pub enum ToolType {
250    #[default]
251    Function,
252}
253
254#[derive(Debug, Clone, Deserialize)]
255pub struct ChatCompletionResponse {
256    /// A unique identifier for the chat completion.
257    pub id: String,
258    /// A list of chat completion choices. Can be more than one if n is greater than 1.
259    pub choices: Vec<ChatCompletionChoice>,
260    /// The Unix timestamp (in seconds) of when the chat completion was created.
261    pub created: usize,
262    /// The model used for the chat completion.
263    pub model: ChatCompleteModel,
264    /// This fingerprint represents the backend configuration that the model runs with. Can be used in conjunction with the seed request parameter to understand when backend changes have been made that might impact determinism.
265    pub system_fingerprint: Option<String>,
266    /// The object type, which is always chat.completion.
267    pub object: String,
268    /// Usage statistics for the completion request.
269    pub usage: ChatCompleteUsage,
270}
271
272#[derive(Debug, Clone, Deserialize)]
273pub struct ChatCompletionChoice {
274    /// The reason the model stopped generating tokens. This will be stop if the model hit a natural stop point or a provided stop sequence, length if the maximum number of tokens specified in the request was reached, content_filter if content was omitted due to a flag from our content filters, tool_calls if the model called a tool, or function_call (deprecated) if the model called a function.
275    pub finish_reason: FinishReason,
276    /// The index of the choice in the list of choices.
277    pub index: usize,
278    /// A chat completion message generated by the model.
279    pub message: AssistantMessage,
280}
281
282#[derive(Debug, Clone, Deserialize)]
283pub struct ChatCompleteUsage {
284    /// Number of tokens in the generated completion.
285    pub completion_tokens: usize,
286    /// Number of tokens in the prompt.
287    pub prompt_tokens: usize,
288    /// Total number of tokens used in the request (prompt + completion).
289    pub total_tokens: usize,
290}
291
292#[derive(Deserialize, Clone, Debug)]
293pub struct Delta {
294    pub content: Option<String>,
295    pub reasoning_content: Option<String>,
296    pub role: Option<String>,
297}
298
299#[derive(Deserialize, Clone, Debug)]
300pub struct ChatStreamChoice {
301    pub delta: Delta,
302    pub finish_reason: Option<String>,
303    pub index: usize,
304    pub logprobs: Option<String>,
305}
306
307#[derive(Deserialize, Clone, Debug)]
308pub struct ChatStreamResponse {
309    pub choices: Vec<ChatStreamChoice>,
310    pub created: usize,
311    pub id: String,
312    pub model: String,
313    pub object: String,
314    pub system_fingerprint: Option<String>,
315}
316
317#[derive(
318    Debug, Clone, Copy, Default, PartialEq, Eq, Deserialize, EnumString, Display, VariantNames,
319)]
320#[serde(rename_all = "snake_case")]
321pub enum FinishReason {
322    #[default]
323    Stop,
324    Length,
325    ContentFilter,
326    ToolCalls,
327}
328
329impl IntoRequest for ChatCompletionRequest {
330    fn into_request(self, base_url: &str, client: ClientWithMiddleware) -> RequestBuilder {
331        let url = format!("{}/chat/completions", base_url);
332        client.post(url).json(&self)
333    }
334}
335
336impl ChatCompletionRequest {
337    pub fn new(model: ChatCompleteModel, messages: impl Into<Vec<ChatCompletionMessage>>) -> Self {
338        ChatCompletionRequestBuilder::default()
339            .model(model)
340            .messages(messages)
341            .build()
342            .unwrap()
343    }
344
345    pub fn new_with_tools(
346        model: ChatCompleteModel,
347        messages: impl Into<Vec<ChatCompletionMessage>>,
348        tools: impl Into<Vec<Tool>>,
349    ) -> Self {
350        ChatCompletionRequestBuilder::default()
351            .model(model)
352            .messages(messages)
353            .tools(tools)
354            .build()
355            .unwrap()
356    }
357}
358
359impl ChatCompletionMessage {
360    pub fn new_system(content: impl Into<String>, name: &str) -> ChatCompletionMessage {
361        ChatCompletionMessage::System(SystemMessage {
362            content: content.into(),
363            name: Self::get_name(name),
364        })
365    }
366
367    pub fn new_user(content: impl Into<String>, name: &str) -> ChatCompletionMessage {
368        ChatCompletionMessage::User(UserMessage {
369            content: content.into(),
370            name: Self::get_name(name),
371        })
372    }
373
374    fn get_name(name: &str) -> Option<String> {
375        if name.is_empty() {
376            None
377        } else {
378            Some(name.into())
379        }
380    }
381}
382
383impl Tool {
384    pub fn new_function<T: ToSchema>(
385        name: impl Into<String>,
386        description: impl Into<String>,
387    ) -> Self {
388        let parameters = T::to_schema();
389        Self {
390            r#type: ToolType::Function,
391            function: FunctionInfo {
392                name: name.into(),
393                description: description.into(),
394                parameters,
395            },
396        }
397    }
398}
399
400#[cfg(test)]
401mod tests {
402    use super::*;
403    use crate::{SDK, ToSchema};
404    use anyhow::Result;
405    use schemars::JsonSchema;
406
407    #[allow(dead_code)]
408    #[derive(Debug, Clone, Deserialize, JsonSchema)]
409    struct GetWeatherArgs {
410        /// The city to get the weather for.
411        pub city: String,
412        /// the unit
413        pub unit: TemperatureUnit,
414    }
415
416    #[allow(dead_code)]
417    #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Deserialize, JsonSchema)]
418    enum TemperatureUnit {
419        /// Celsius
420        #[default]
421        Celsius,
422        /// Fahrenheit
423        Fahrenheit,
424    }
425
426    #[derive(Debug, Clone)]
427    struct GetWeatherResponse {
428        temperature: f32,
429        unit: TemperatureUnit,
430    }
431
432    #[allow(dead_code)]
433    #[derive(Debug, Deserialize, JsonSchema)]
434    struct ExplainMoodArgs {
435        /// The mood to explain.
436        pub name: String,
437    }
438
439    fn get_weather_forecast(args: GetWeatherArgs) -> GetWeatherResponse {
440        match args.unit {
441            TemperatureUnit::Celsius => GetWeatherResponse {
442                temperature: 22.2,
443                unit: TemperatureUnit::Celsius,
444            },
445            TemperatureUnit::Fahrenheit => GetWeatherResponse {
446                temperature: 72.0,
447                unit: TemperatureUnit::Fahrenheit,
448            },
449        }
450    }
451
452    #[test]
453    #[ignore]
454    fn chat_completion_request_tool_choice_function_serialize_should_work() {
455        let req = ChatCompletionRequestBuilder::default()
456            .tool_choice(ToolChoice::Function {
457                name: "my_function".to_string(),
458            })
459            .messages(vec![])
460            .build()
461            .unwrap();
462        let json = serde_json::to_value(req).unwrap();
463        assert_eq!(
464            json,
465            serde_json::json!({
466              "tool_choice": {
467                "type": "function",
468                "function": {
469                  "name": "my_function"
470                }
471              },
472              "messages": []
473            })
474        );
475    }
476
477    #[test]
478    fn chat_completion_request_serialize_should_work() {
479        let mut req = get_simple_completion_request();
480        req.tool_choice = Some(ToolChoice::Auto);
481        let json = serde_json::to_value(req).unwrap();
482        assert_eq!(
483            json,
484            serde_json::json!({
485              "tool_choice": "auto",
486              "model": "gpt-3.5-turbo-1106",
487              "messages": [{
488                "role": "system",
489                "content": "I can answer any question you ask me."
490              }, {
491                "role": "user",
492                "content": "What is human life expectancy in the world?",
493                "name": "user1"
494              }]
495            })
496        );
497    }
498
499    #[test]
500    fn chat_completion_request_with_tools_serialize_should_work() {
501        let req = get_tool_completion_request();
502        let json = serde_json::to_value(req).unwrap();
503        assert_eq!(
504            json,
505            serde_json::json!({
506              "model": "gpt-3.5-turbo-1106",
507              "messages": [{
508                "role": "system",
509                "content": "I can choose the right function for you."
510              }, {
511                "role": "user",
512                "content": "What is the weather like in Boston?",
513                "name": "user1"
514              }],
515              "tools": [
516                {
517                  "type": "function",
518                  "function": {
519                    "description": "Get the weather forecast for a city.",
520                    "name": "get_weather_forecast",
521                    "parameters": GetWeatherArgs::to_schema()
522                  }
523                },
524                {
525                  "type": "function",
526                  "function": {
527                    "description": "Explain the meaning of the given mood.",
528                    "name": "explain_mood",
529                    "parameters": ExplainMoodArgs::to_schema()
530                  }
531                }
532              ]
533            })
534        );
535    }
536
537    #[tokio::test]
538    #[ignore]
539    async fn simple_chat_completion_should_work() -> Result<()> {
540        let req = get_simple_completion_request();
541        let res = SDK.chat_completion(req).await?;
542        assert_eq!(res.model, ChatCompleteModel::Gpt3Turbo);
543        assert_eq!(res.object, "chat.completion");
544        assert_eq!(res.choices.len(), 1);
545        let choice = &res.choices[0];
546        assert_eq!(choice.finish_reason, FinishReason::Stop);
547        assert_eq!(choice.index, 0);
548        assert_eq!(choice.message.tool_calls.len(), 0);
549        Ok(())
550    }
551
552    #[tokio::test]
553    #[ignore]
554    async fn chat_completion_with_tools_should_work() -> Result<()> {
555        let req = get_tool_completion_request();
556        let res = SDK.chat_completion(req).await?;
557        assert_eq!(res.model, ChatCompleteModel::Gpt3Turbo);
558        assert_eq!(res.object, "chat.completion");
559        assert_eq!(res.choices.len(), 1);
560        let choice = &res.choices[0];
561        assert_eq!(choice.finish_reason, FinishReason::ToolCalls);
562        assert_eq!(choice.index, 0);
563        assert_eq!(choice.message.content, None);
564        assert_eq!(choice.message.tool_calls.len(), 1);
565        let tool_call = &choice.message.tool_calls[0];
566        assert_eq!(tool_call.function.name, "get_weather_forecast");
567        let ret = get_weather_forecast(serde_json::from_str(&tool_call.function.arguments)?);
568        assert_eq!(ret.unit, TemperatureUnit::Celsius);
569        assert_eq!(ret.temperature, 22.2);
570        Ok(())
571    }
572
573    fn get_simple_completion_request() -> ChatCompletionRequest {
574        let messages = vec![
575            ChatCompletionMessage::new_system("I can answer any question you ask me.", ""),
576            ChatCompletionMessage::new_user("What is human life expectancy in the world?", "user1"),
577        ];
578        ChatCompletionRequest::new(ChatCompleteModel::Gpt3Turbo, messages)
579    }
580
581    fn get_tool_completion_request() -> ChatCompletionRequest {
582        let messages = vec![
583            ChatCompletionMessage::new_system("I can choose the right function for you.", ""),
584            ChatCompletionMessage::new_user("What is the weather like in Boston?", "user1"),
585        ];
586        let tools = vec![
587            Tool::new_function::<GetWeatherArgs>(
588                "get_weather_forecast",
589                "Get the weather forecast for a city.",
590            ),
591            Tool::new_function::<ExplainMoodArgs>(
592                "explain_mood",
593                "Explain the meaning of the given mood.",
594            ),
595        ];
596        ChatCompletionRequest::new_with_tools(ChatCompleteModel::Gpt3Turbo, messages, tools)
597    }
598}