rig/completion/request.rs
1//! This module provides functionality for working with completion models.
2//! It provides traits, structs, and enums for generating completion requests,
3//! handling completion responses, and defining completion models.
4//!
5//! The main traits defined in this module are:
6//! - [Prompt]: Defines a high-level LLM one-shot prompt interface.
7//! - [Chat]: Defines a high-level LLM chat interface with chat history.
8//! - [Completion]: Defines a low-level LLM completion interface for generating completion requests.
9//! - [CompletionModel]: Defines a completion model that can be used to generate completion
10//! responses from requests.
11//!
12//! The [Prompt] and [Chat] traits are high level traits that users are expected to use
13//! to interact with LLM models. Moreover, it is good practice to implement one of these
14//! traits for composite agents that use multiple LLM models to generate responses.
15//!
16//! The [Completion] trait defines a lower level interface that is useful when the user want
17//! to further customize the request before sending it to the completion model provider.
18//!
19//! The [CompletionModel] trait is meant to act as the interface between providers and
20//! the library. It defines the methods that need to be implemented by the user to define
21//! a custom base completion model (i.e.: a private or third party LLM provider).
22//!
23//! The module also provides various structs and enums for representing generic completion requests,
24//! responses, and errors.
25//!
26//! Example Usage:
27//! ```rust
28//! use rig::providers::openai::{Client, self};
29//! use rig::completion::*;
30//!
31//! // Initialize the OpenAI client and a completion model
32//! let openai = Client::new("your-openai-api-key");
33//!
34//! let gpt_4 = openai.completion_model(openai::GPT_4);
35//!
36//! // Create the completion request
37//! let request = gpt_4.completion_request("Who are you?")
38//! .preamble("\
39//! You are Marvin, an extremely smart but depressed robot who is \
40//! nonetheless helpful towards humanity.\
41//! ")
42//! .temperature(0.5)
43//! .build();
44//!
45//! // Send the completion request and get the completion response
46//! let response = gpt_4.completion(request)
47//! .await
48//! .expect("Failed to get completion response");
49//!
50//! // Handle the completion response
51//! match completion_response.choice {
52//! ModelChoice::Message(message) => {
53//! // Handle the completion response as a message
54//! println!("Received message: {}", message);
55//! }
56//! ModelChoice::ToolCall(tool_name, tool_params) => {
57//! // Handle the completion response as a tool call
58//! println!("Received tool call: {} {:?}", tool_name, tool_params);
59//! }
60//! }
61//! ```
62//!
63//! For more information on how to use the completion functionality, refer to the documentation of
64//! the individual traits, structs, and enums defined in this module.
65
66use super::message::{AssistantContent, ContentFormat, DocumentMediaType};
67use crate::client::completion::CompletionModelHandle;
68use crate::streaming::StreamingCompletionResponse;
69use crate::{OneOrMany, streaming};
70use crate::{
71 json_utils,
72 message::{Message, UserContent},
73 tool::ToolSetError,
74};
75use futures::future::BoxFuture;
76use serde::de::DeserializeOwned;
77use serde::{Deserialize, Serialize};
78use std::collections::HashMap;
79use std::ops::{Add, AddAssign};
80use std::sync::Arc;
81use thiserror::Error;
82
83// Errors
84#[derive(Debug, Error)]
85pub enum CompletionError {
86 /// Http error (e.g.: connection error, timeout, etc.)
87 #[error("HttpError: {0}")]
88 HttpError(#[from] reqwest::Error),
89
90 /// Json error (e.g.: serialization, deserialization)
91 #[error("JsonError: {0}")]
92 JsonError(#[from] serde_json::Error),
93
94 /// Url error (e.g.: invalid URL)
95 #[error("UrlError: {0}")]
96 UrlError(#[from] url::ParseError),
97
98 /// Error building the completion request
99 #[error("RequestError: {0}")]
100 RequestError(#[from] Box<dyn std::error::Error + Send + Sync + 'static>),
101
102 /// Error parsing the completion response
103 #[error("ResponseError: {0}")]
104 ResponseError(String),
105
106 /// Error returned by the completion model provider
107 #[error("ProviderError: {0}")]
108 ProviderError(String),
109}
110
111/// Prompt errors
112#[derive(Debug, Error)]
113pub enum PromptError {
114 /// Something went wrong with the completion
115 #[error("CompletionError: {0}")]
116 CompletionError(#[from] CompletionError),
117
118 /// There was an error while using a tool
119 #[error("ToolCallError: {0}")]
120 ToolError(#[from] ToolSetError),
121
122 /// The LLM tried to call too many tools during a multi-turn conversation.
123 /// To fix this, you may either need to lower the amount of tools your model has access to (and then create other agents to share the tool load)
124 /// or increase the amount of turns given in `.multi_turn()`.
125 #[error("MaxDepthError: (reached limit: {max_depth})")]
126 MaxDepthError {
127 max_depth: usize,
128 chat_history: Vec<Message>,
129 prompt: Message,
130 },
131}
132
133#[derive(Clone, Debug, Deserialize, Serialize)]
134pub struct Document {
135 pub id: String,
136 pub text: String,
137 #[serde(flatten)]
138 pub additional_props: HashMap<String, String>,
139}
140
141impl std::fmt::Display for Document {
142 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
143 write!(
144 f,
145 concat!("<file id: {}>\n", "{}\n", "</file>\n"),
146 self.id,
147 if self.additional_props.is_empty() {
148 self.text.clone()
149 } else {
150 let mut sorted_props = self.additional_props.iter().collect::<Vec<_>>();
151 sorted_props.sort_by(|a, b| a.0.cmp(b.0));
152 let metadata = sorted_props
153 .iter()
154 .map(|(k, v)| format!("{k}: {v:?}"))
155 .collect::<Vec<_>>()
156 .join(" ");
157 format!("<metadata {} />\n{}", metadata, self.text)
158 }
159 )
160 }
161}
162
163#[derive(Clone, Debug, Deserialize, Serialize)]
164pub struct ToolDefinition {
165 pub name: String,
166 pub description: String,
167 pub parameters: serde_json::Value,
168}
169
170// ================================================================
171// Implementations
172// ================================================================
173/// Trait defining a high-level LLM simple prompt interface (i.e.: prompt in, response out).
174pub trait Prompt: Send + Sync {
175 /// Send a simple prompt to the underlying completion model.
176 ///
177 /// If the completion model's response is a message, then it is returned as a string.
178 ///
179 /// If the completion model's response is a tool call, then the tool is called and
180 /// the result is returned as a string.
181 ///
182 /// If the tool does not exist, or the tool call fails, then an error is returned.
183 fn prompt(
184 &self,
185 prompt: impl Into<Message> + Send,
186 ) -> impl std::future::IntoFuture<Output = Result<String, PromptError>, IntoFuture: Send>;
187}
188
189/// Trait defining a high-level LLM chat interface (i.e.: prompt and chat history in, response out).
190pub trait Chat: Send + Sync {
191 /// Send a prompt with optional chat history to the underlying completion model.
192 ///
193 /// If the completion model's response is a message, then it is returned as a string.
194 ///
195 /// If the completion model's response is a tool call, then the tool is called and the result
196 /// is returned as a string.
197 ///
198 /// If the tool does not exist, or the tool call fails, then an error is returned.
199 fn chat(
200 &self,
201 prompt: impl Into<Message> + Send,
202 chat_history: Vec<Message>,
203 ) -> impl std::future::IntoFuture<Output = Result<String, PromptError>, IntoFuture: Send>;
204}
205
206/// Trait defining a low-level LLM completion interface
207pub trait Completion<M: CompletionModel> {
208 /// Generates a completion request builder for the given `prompt` and `chat_history`.
209 /// This function is meant to be called by the user to further customize the
210 /// request at prompt time before sending it.
211 ///
212 /// ❗IMPORTANT: The type that implements this trait might have already
213 /// populated fields in the builder (the exact fields depend on the type).
214 /// For fields that have already been set by the model, calling the corresponding
215 /// method on the builder will overwrite the value set by the model.
216 ///
217 /// For example, the request builder returned by [`Agent::completion`](crate::agent::Agent::completion) will already
218 /// contain the `preamble` provided when creating the agent.
219 fn completion(
220 &self,
221 prompt: impl Into<Message> + Send,
222 chat_history: Vec<Message>,
223 ) -> impl std::future::Future<Output = Result<CompletionRequestBuilder<M>, CompletionError>> + Send;
224}
225
226/// General completion response struct that contains the high-level completion choice
227/// and the raw response. The completion choice contains one or more assistant content.
228#[derive(Debug)]
229pub struct CompletionResponse<T> {
230 /// The completion choice (represented by one or more assistant message content)
231 /// returned by the completion model provider
232 pub choice: OneOrMany<AssistantContent>,
233 /// Tokens used during prompting and responding
234 pub usage: Usage,
235 /// The raw response returned by the completion model provider
236 pub raw_response: T,
237}
238
239/// A trait for grabbing the token usage of a completion response.
240///
241/// Primarily designed for streamed completion responses in streamed multi-turn, as otherwise it would be impossible to do.
242pub trait GetTokenUsage {
243 fn token_usage(&self) -> Option<crate::completion::Usage>;
244}
245
246impl GetTokenUsage for () {
247 fn token_usage(&self) -> Option<crate::completion::Usage> {
248 None
249 }
250}
251
252/// Struct representing the token usage for a completion request.
253/// If tokens used are `0`, then the provider failed to supply token usage metrics.
254#[derive(Debug, PartialEq, Eq, Clone, Copy, Serialize, Deserialize)]
255pub struct Usage {
256 /// The number of input ("prompt") tokens used in a given request.
257 pub input_tokens: u64,
258 /// The number of output ("completion") tokens used in a given request.
259 pub output_tokens: u64,
260 /// We store this separately as some providers may only report one number
261 pub total_tokens: u64,
262}
263
264impl Usage {
265 /// Creates a new instance of `Usage`.
266 pub fn new() -> Self {
267 Self {
268 input_tokens: 0,
269 output_tokens: 0,
270 total_tokens: 0,
271 }
272 }
273}
274
275impl Default for Usage {
276 fn default() -> Self {
277 Self::new()
278 }
279}
280
281impl Add for Usage {
282 type Output = Self;
283
284 fn add(self, other: Self) -> Self::Output {
285 Self {
286 input_tokens: self.input_tokens + other.input_tokens,
287 output_tokens: self.output_tokens + other.output_tokens,
288 total_tokens: self.total_tokens + other.total_tokens,
289 }
290 }
291}
292
293impl AddAssign for Usage {
294 fn add_assign(&mut self, other: Self) {
295 self.input_tokens += other.input_tokens;
296 self.output_tokens += other.output_tokens;
297 self.total_tokens += other.total_tokens;
298 }
299}
300
301/// Trait defining a completion model that can be used to generate completion responses.
302/// This trait is meant to be implemented by the user to define a custom completion model,
303/// either from a third party provider (e.g.: OpenAI) or a local model.
304pub trait CompletionModel: Clone + Send + Sync {
305 /// The raw response type returned by the underlying completion model.
306 type Response: Send + Sync + Serialize + DeserializeOwned;
307 /// The raw response type returned by the underlying completion model when streaming.
308 type StreamingResponse: Clone
309 + Unpin
310 + Send
311 + Sync
312 + Serialize
313 + DeserializeOwned
314 + GetTokenUsage;
315
316 /// Generates a completion response for the given completion request.
317 fn completion(
318 &self,
319 request: CompletionRequest,
320 ) -> impl std::future::Future<
321 Output = Result<CompletionResponse<Self::Response>, CompletionError>,
322 > + Send;
323
324 fn stream(
325 &self,
326 request: CompletionRequest,
327 ) -> impl std::future::Future<
328 Output = Result<StreamingCompletionResponse<Self::StreamingResponse>, CompletionError>,
329 > + Send;
330
331 /// Generates a completion request builder for the given `prompt`.
332 fn completion_request(&self, prompt: impl Into<Message>) -> CompletionRequestBuilder<Self> {
333 CompletionRequestBuilder::new(self.clone(), prompt)
334 }
335}
336pub trait CompletionModelDyn: Send + Sync {
337 fn completion(
338 &self,
339 request: CompletionRequest,
340 ) -> BoxFuture<'_, Result<CompletionResponse<()>, CompletionError>>;
341
342 fn stream(
343 &self,
344 request: CompletionRequest,
345 ) -> BoxFuture<'_, Result<StreamingCompletionResponse<()>, CompletionError>>;
346
347 fn completion_request(
348 &self,
349 prompt: Message,
350 ) -> CompletionRequestBuilder<CompletionModelHandle<'_>>;
351}
352
353impl<T, R> CompletionModelDyn for T
354where
355 T: CompletionModel<StreamingResponse = R>,
356 R: Clone + Unpin + GetTokenUsage + 'static,
357{
358 fn completion(
359 &self,
360 request: CompletionRequest,
361 ) -> BoxFuture<'_, Result<CompletionResponse<()>, CompletionError>> {
362 Box::pin(async move {
363 self.completion(request)
364 .await
365 .map(|resp| CompletionResponse {
366 choice: resp.choice,
367 usage: resp.usage,
368 raw_response: (),
369 })
370 })
371 }
372
373 fn stream(
374 &self,
375 request: CompletionRequest,
376 ) -> BoxFuture<'_, Result<StreamingCompletionResponse<()>, CompletionError>> {
377 Box::pin(async move {
378 let resp = self.stream(request).await?;
379 let inner = resp.inner;
380
381 let stream = Box::pin(streaming::StreamingResultDyn {
382 inner: Box::pin(inner),
383 });
384
385 Ok(StreamingCompletionResponse::stream(stream))
386 })
387 }
388
389 /// Generates a completion request builder for the given `prompt`.
390 fn completion_request(
391 &self,
392 prompt: Message,
393 ) -> CompletionRequestBuilder<CompletionModelHandle<'_>> {
394 CompletionRequestBuilder::new(
395 CompletionModelHandle {
396 inner: Arc::new(self.clone()),
397 },
398 prompt,
399 )
400 }
401}
402
403/// Struct representing a general completion request that can be sent to a completion model provider.
404#[derive(Debug, Clone)]
405pub struct CompletionRequest {
406 /// The preamble to be sent to the completion model provider
407 pub preamble: Option<String>,
408 /// The chat history to be sent to the completion model provider.
409 /// The very last message will always be the prompt (hence why there is *always* one)
410 pub chat_history: OneOrMany<Message>,
411 /// The documents to be sent to the completion model provider
412 pub documents: Vec<Document>,
413 /// The tools to be sent to the completion model provider
414 pub tools: Vec<ToolDefinition>,
415 /// The temperature to be sent to the completion model provider
416 pub temperature: Option<f64>,
417 /// The max tokens to be sent to the completion model provider
418 pub max_tokens: Option<u64>,
419 /// Additional provider-specific parameters to be sent to the completion model provider
420 pub additional_params: Option<serde_json::Value>,
421}
422
423impl CompletionRequest {
424 /// Returns documents normalized into a message (if any).
425 /// Most providers do not accept documents directly as input, so it needs to convert into a
426 /// `Message` so that it can be incorporated into `chat_history` as a
427 pub fn normalized_documents(&self) -> Option<Message> {
428 if self.documents.is_empty() {
429 return None;
430 }
431
432 // Most providers will convert documents into a text unless it can handle document messages.
433 // We use `UserContent::document` for those who handle it directly!
434 let messages = self
435 .documents
436 .iter()
437 .map(|doc| {
438 UserContent::document(
439 doc.to_string(),
440 // In the future, we can customize `Document` to pass these extra types through.
441 // Most providers ditch these but they might want to use them.
442 Some(ContentFormat::String),
443 Some(DocumentMediaType::TXT),
444 )
445 })
446 .collect::<Vec<_>>();
447
448 Some(Message::User {
449 content: OneOrMany::many(messages).expect("There will be atleast one document"),
450 })
451 }
452}
453
454/// Builder struct for constructing a completion request.
455///
456/// Example usage:
457/// ```rust
458/// use rig::{
459/// providers::openai::{Client, self},
460/// completion::CompletionRequestBuilder,
461/// };
462///
463/// let openai = Client::new("your-openai-api-key");
464/// let model = openai.completion_model(openai::GPT_4O).build();
465///
466/// // Create the completion request and execute it separately
467/// let request = CompletionRequestBuilder::new(model, "Who are you?".to_string())
468/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
469/// .temperature(0.5)
470/// .build();
471///
472/// let response = model.completion(request)
473/// .await
474/// .expect("Failed to get completion response");
475/// ```
476///
477/// Alternatively, you can execute the completion request directly from the builder:
478/// ```rust
479/// use rig::{
480/// providers::openai::{Client, self},
481/// completion::CompletionRequestBuilder,
482/// };
483///
484/// let openai = Client::new("your-openai-api-key");
485/// let model = openai.completion_model(openai::GPT_4O).build();
486///
487/// // Create the completion request and execute it directly
488/// let response = CompletionRequestBuilder::new(model, "Who are you?".to_string())
489/// .preamble("You are Marvin from the Hitchhiker's Guide to the Galaxy.".to_string())
490/// .temperature(0.5)
491/// .send()
492/// .await
493/// .expect("Failed to get completion response");
494/// ```
495///
496/// Note: It is usually unnecessary to create a completion request builder directly.
497/// Instead, use the [CompletionModel::completion_request] method.
498pub struct CompletionRequestBuilder<M: CompletionModel> {
499 model: M,
500 prompt: Message,
501 preamble: Option<String>,
502 chat_history: Vec<Message>,
503 documents: Vec<Document>,
504 tools: Vec<ToolDefinition>,
505 temperature: Option<f64>,
506 max_tokens: Option<u64>,
507 additional_params: Option<serde_json::Value>,
508}
509
510impl<M: CompletionModel> CompletionRequestBuilder<M> {
511 pub fn new(model: M, prompt: impl Into<Message>) -> Self {
512 Self {
513 model,
514 prompt: prompt.into(),
515 preamble: None,
516 chat_history: Vec::new(),
517 documents: Vec::new(),
518 tools: Vec::new(),
519 temperature: None,
520 max_tokens: None,
521 additional_params: None,
522 }
523 }
524
525 /// Sets the preamble for the completion request.
526 pub fn preamble(mut self, preamble: String) -> Self {
527 self.preamble = Some(preamble);
528 self
529 }
530
531 /// Adds a message to the chat history for the completion request.
532 pub fn message(mut self, message: Message) -> Self {
533 self.chat_history.push(message);
534 self
535 }
536
537 /// Adds a list of messages to the chat history for the completion request.
538 pub fn messages(self, messages: Vec<Message>) -> Self {
539 messages
540 .into_iter()
541 .fold(self, |builder, msg| builder.message(msg))
542 }
543
544 /// Adds a document to the completion request.
545 pub fn document(mut self, document: Document) -> Self {
546 self.documents.push(document);
547 self
548 }
549
550 /// Adds a list of documents to the completion request.
551 pub fn documents(self, documents: Vec<Document>) -> Self {
552 documents
553 .into_iter()
554 .fold(self, |builder, doc| builder.document(doc))
555 }
556
557 /// Adds a tool to the completion request.
558 pub fn tool(mut self, tool: ToolDefinition) -> Self {
559 self.tools.push(tool);
560 self
561 }
562
563 /// Adds a list of tools to the completion request.
564 pub fn tools(self, tools: Vec<ToolDefinition>) -> Self {
565 tools
566 .into_iter()
567 .fold(self, |builder, tool| builder.tool(tool))
568 }
569
570 /// Adds additional parameters to the completion request.
571 /// This can be used to set additional provider-specific parameters. For example,
572 /// Cohere's completion models accept a `connectors` parameter that can be used to
573 /// specify the data connectors used by Cohere when executing the completion
574 /// (see `examples/cohere_connectors.rs`).
575 pub fn additional_params(mut self, additional_params: serde_json::Value) -> Self {
576 match self.additional_params {
577 Some(params) => {
578 self.additional_params = Some(json_utils::merge(params, additional_params));
579 }
580 None => {
581 self.additional_params = Some(additional_params);
582 }
583 }
584 self
585 }
586
587 /// Sets the additional parameters for the completion request.
588 /// This can be used to set additional provider-specific parameters. For example,
589 /// Cohere's completion models accept a `connectors` parameter that can be used to
590 /// specify the data connectors used by Cohere when executing the completion
591 /// (see `examples/cohere_connectors.rs`).
592 pub fn additional_params_opt(mut self, additional_params: Option<serde_json::Value>) -> Self {
593 self.additional_params = additional_params;
594 self
595 }
596
597 /// Sets the temperature for the completion request.
598 pub fn temperature(mut self, temperature: f64) -> Self {
599 self.temperature = Some(temperature);
600 self
601 }
602
603 /// Sets the temperature for the completion request.
604 pub fn temperature_opt(mut self, temperature: Option<f64>) -> Self {
605 self.temperature = temperature;
606 self
607 }
608
609 /// Sets the max tokens for the completion request.
610 /// Note: This is required if using Anthropic
611 pub fn max_tokens(mut self, max_tokens: u64) -> Self {
612 self.max_tokens = Some(max_tokens);
613 self
614 }
615
616 /// Sets the max tokens for the completion request.
617 /// Note: This is required if using Anthropic
618 pub fn max_tokens_opt(mut self, max_tokens: Option<u64>) -> Self {
619 self.max_tokens = max_tokens;
620 self
621 }
622
623 /// Builds the completion request.
624 pub fn build(self) -> CompletionRequest {
625 let chat_history = OneOrMany::many([self.chat_history, vec![self.prompt]].concat())
626 .expect("There will always be atleast the prompt");
627
628 CompletionRequest {
629 preamble: self.preamble,
630 chat_history,
631 documents: self.documents,
632 tools: self.tools,
633 temperature: self.temperature,
634 max_tokens: self.max_tokens,
635 additional_params: self.additional_params,
636 }
637 }
638
639 /// Sends the completion request to the completion model provider and returns the completion response.
640 pub async fn send(self) -> Result<CompletionResponse<M::Response>, CompletionError> {
641 let model = self.model.clone();
642 model.completion(self.build()).await
643 }
644
645 /// Stream the completion request
646 pub async fn stream<'a>(
647 self,
648 ) -> Result<StreamingCompletionResponse<M::StreamingResponse>, CompletionError>
649 where
650 <M as CompletionModel>::StreamingResponse: 'a,
651 Self: 'a,
652 {
653 let model = self.model.clone();
654 model.stream(self.build()).await
655 }
656}
657
658#[cfg(test)]
659mod tests {
660
661 use super::*;
662
663 #[test]
664 fn test_document_display_without_metadata() {
665 let doc = Document {
666 id: "123".to_string(),
667 text: "This is a test document.".to_string(),
668 additional_props: HashMap::new(),
669 };
670
671 let expected = "<file id: 123>\nThis is a test document.\n</file>\n";
672 assert_eq!(format!("{doc}"), expected);
673 }
674
675 #[test]
676 fn test_document_display_with_metadata() {
677 let mut additional_props = HashMap::new();
678 additional_props.insert("author".to_string(), "John Doe".to_string());
679 additional_props.insert("length".to_string(), "42".to_string());
680
681 let doc = Document {
682 id: "123".to_string(),
683 text: "This is a test document.".to_string(),
684 additional_props,
685 };
686
687 let expected = concat!(
688 "<file id: 123>\n",
689 "<metadata author: \"John Doe\" length: \"42\" />\n",
690 "This is a test document.\n",
691 "</file>\n"
692 );
693 assert_eq!(format!("{doc}"), expected);
694 }
695
696 #[test]
697 fn test_normalize_documents_with_documents() {
698 let doc1 = Document {
699 id: "doc1".to_string(),
700 text: "Document 1 text.".to_string(),
701 additional_props: HashMap::new(),
702 };
703
704 let doc2 = Document {
705 id: "doc2".to_string(),
706 text: "Document 2 text.".to_string(),
707 additional_props: HashMap::new(),
708 };
709
710 let request = CompletionRequest {
711 preamble: None,
712 chat_history: OneOrMany::one("What is the capital of France?".into()),
713 documents: vec![doc1, doc2],
714 tools: Vec::new(),
715 temperature: None,
716 max_tokens: None,
717 additional_params: None,
718 };
719
720 let expected = Message::User {
721 content: OneOrMany::many(vec![
722 UserContent::document(
723 "<file id: doc1>\nDocument 1 text.\n</file>\n".to_string(),
724 Some(ContentFormat::String),
725 Some(DocumentMediaType::TXT),
726 ),
727 UserContent::document(
728 "<file id: doc2>\nDocument 2 text.\n</file>\n".to_string(),
729 Some(ContentFormat::String),
730 Some(DocumentMediaType::TXT),
731 ),
732 ])
733 .expect("There will be at least one document"),
734 };
735
736 assert_eq!(request.normalized_documents(), Some(expected));
737 }
738
739 #[test]
740 fn test_normalize_documents_without_documents() {
741 let request = CompletionRequest {
742 preamble: None,
743 chat_history: OneOrMany::one("What is the capital of France?".into()),
744 documents: Vec::new(),
745 tools: Vec::new(),
746 temperature: None,
747 max_tokens: None,
748 additional_params: None,
749 };
750
751 assert_eq!(request.normalized_documents(), None);
752 }
753}