openai_tools/responses/mod.rs
1//! OpenAI Responses API Module
2//!
3//! This module provides functionality for interacting with the OpenAI Responses API,
4//! which is designed for creating AI assistants that can handle various types of input
5//! including text, images, and structured data. The Responses API offers a more flexible
6//! and powerful way to build conversational AI applications compared to the traditional
7//! Chat Completions API.
8//!
9//! # Key Features
10//!
11//! - **Multi-modal Input**: Support for text, images, and other content types
12//! - **Structured Output**: JSON schema-based response formatting
13//! - **Tool Integration**: Function calling capabilities with custom tools
14//! - **Flexible Instructions**: System-level instructions for AI behavior
15//! - **Rich Content Handling**: Support for complex message structures
16//!
17//! # Quick Start
18//!
19//! ```rust,no_run
20//! use openai_tools::responses::request::Responses;
21//!
22//! #[tokio::main]
23//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
24//! // Initialize the responses client
25//! let mut responses = Responses::new();
26//!
27//! // Configure basic parameters
28//! responses
29//! .model_id("gpt-5-mini")
30//! .instructions("You are a helpful assistant.");
31//!
32//! // Simple text input
33//! responses.str_message("Hello! How are you today?");
34//!
35//! // Send the request
36//! let response = responses.complete().await?;
37//!
38//! println!("AI Response: {}", response.output_text().unwrap());
39//! Ok(())
40//! }
41//! ```
42//!
43//! # Advanced Usage Examples
44//!
45//! ## Using Message-based Conversations
46//!
47//! ```rust,no_run
48//! use openai_tools::responses::request::Responses;
49//! use openai_tools::common::message::Message;
50//! use openai_tools::common::role::Role;
51//!
52//! #[tokio::main]
53//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
54//! let mut responses = Responses::new();
55//!
56//! responses
57//! .model_id("gpt-5-mini")
58//! .instructions("You are a knowledgeable assistant.");
59//!
60//! // Create a conversation with multiple messages
61//! let messages = vec![
62//! Message::from_string(Role::User, "What is artificial intelligence?"),
63//! Message::from_string(Role::Assistant, "AI is a field of computer science..."),
64//! Message::from_string(Role::User, "Can you give me a simple example?"),
65//! ];
66//!
67//! responses.messages(messages);
68//!
69//! let response = responses.complete().await?;
70//! println!("Response: {}", response.output_text().unwrap());
71//! Ok(())
72//! }
73//! ```
74//!
75//! ## Multi-modal Input with Images
76//!
77//! ```rust,no_run
78//! use openai_tools::responses::request::Responses;
79//! use openai_tools::common::message::{Message, Content};
80//! use openai_tools::common::role::Role;
81//!
82//! #[tokio::main]
83//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
84//! let mut responses = Responses::new();
85//!
86//! responses
87//! .model_id("gpt-5-mini")
88//! .instructions("You are an image analysis assistant.");
89//!
90//! // Create a message with both text and image content
91//! let message = Message::from_message_array(
92//! Role::User,
93//! vec![
94//! Content::from_text("What do you see in this image?"),
95//! Content::from_image_file("path/to/image.jpg"),
96//! ],
97//! );
98//!
99//! responses.messages(vec![message]);
100//!
101//! let response = responses.complete().await?;
102//! println!("Image analysis: {}", response.output_text().unwrap());
103//! Ok(())
104//! }
105//! ```
106//!
107//! ## Structured Output with JSON Schema
108//!
109//! ```rust,no_run
110//! use openai_tools::responses::request::Responses;
111//! use openai_tools::common::message::Message;
112//! use openai_tools::common::role::Role;
113//! use openai_tools::common::structured_output::Schema;
114//! use serde::{Deserialize, Serialize};
115//!
116//! #[derive(Debug, Serialize, Deserialize)]
117//! struct ProductInfo {
118//! name: String,
119//! price: f64,
120//! category: String,
121//! in_stock: bool,
122//! }
123//!
124//! #[tokio::main]
125//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
126//! let mut responses = Responses::new();
127//!
128//! responses.model_id("gpt-5-mini");
129//!
130//! let messages = vec![
131//! Message::from_string(Role::User,
132//! "Extract product information: 'MacBook Pro 16-inch, $2499, Electronics, Available'")
133//! ];
134//! responses.messages(messages);
135//!
136//! // Define JSON schema for structured output
137//! let mut schema = Schema::responses_json_schema("product_info");
138//! schema.add_property("name", "string", "Product name");
139//! schema.add_property("price", "number", "Product price");
140//! schema.add_property("category", "string", "Product category");
141//! schema.add_property("in_stock", "boolean", "Availability status");
142//!
143//! responses.structured_output(schema);
144//!
145//! let response = responses.complete().await?;
146//!
147//! // Parse structured response
148//! let product: ProductInfo = serde_json::from_str(&response.output_text().unwrap())?;
149//!
150//! println!("Product: {} - ${} ({})", product.name, product.price, product.category);
151//! Ok(())
152//! }
153//! ```
154//!
155//! ## Function Calling with Tools
156//!
157//! ```rust,no_run
158//! use openai_tools::responses::request::Responses;
159//! use openai_tools::common::message::Message;
160//! use openai_tools::common::role::Role;
161//! use openai_tools::common::tool::Tool;
162//! use openai_tools::common::parameters::{Parameters, ParameterProperty};
163//!
164//! #[tokio::main]
165//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
166//! let mut responses = Responses::new();
167//!
168//! responses
169//! .model_id("gpt-5-mini")
170//! .instructions("You are a helpful calculator assistant.");
171//!
172//! // Define a calculator tool
173//! let calculator_tool = Tool::function(
174//! "calculator",
175//! "Perform basic arithmetic operations",
176//! vec![
177//! ("operation", ParameterProperty::from_string("add, subtract, multiply, or divide")),
178//! ("a", ParameterProperty::from_number("First number")),
179//! ("b", ParameterProperty::from_number("Second number")),
180//! ],
181//! false,
182//! );
183//!
184//! let messages = vec![
185//! Message::from_string(Role::User, "Calculate 15 * 7 using the calculator tool")
186//! ];
187//!
188//! responses
189//! .messages(messages)
190//! .tools(vec![calculator_tool]);
191//!
192//! let response = responses.complete().await?;
193//!
194//! // Check if the model made a function call
195//! println!("Response: {}", response.output_text().unwrap());
196//!
197//! Ok(())
198//! }
199//! ```
200//!
201//! # API Differences from Chat Completions
202//!
203//! The Responses API differs from the Chat Completions API in several key ways:
204//!
205//! - **Input Format**: More flexible input handling with support for various content types
206//! - **Output Structure**: Different response format optimized for assistant-style interactions
207//! - **Instructions**: Dedicated field for system-level instructions
208//! - **Multi-modal**: Native support for images and other media types
209//! - **Tool Integration**: Enhanced function calling capabilities
210//!
211//! # Environment Setup
212//!
213//! Ensure your OpenAI API key is configured:
214//!
215//! ```bash
216//! export OPENAI_API_KEY="your-api-key-here"
217//! ```
218//!
219//! Or in a `.env` file:
220//!
221//! ```text
222//! OPENAI_API_KEY=your-api-key-here
223//! ```
224//!
225//! # Error Handling
226//!
227//! All operations return `Result` types for proper error handling:
228//!
229//! ```rust,no_run
230//! use openai_tools::responses::request::Responses;
231//! use openai_tools::common::errors::OpenAIToolError;
232//!
233//! #[tokio::main]
234//! async fn main() {
235//! let mut responses = Responses::new();
236//!
237//! match responses.model_id("gpt-5-mini").complete().await {
238//! Ok(response) => {
239//! println!("Success: {}", response.output_text().unwrap());
240//! }
241//! Err(OpenAIToolError::RequestError(e)) => {
242//! eprintln!("Network error: {}", e);
243//! }
244//! Err(OpenAIToolError::SerdeJsonError(e)) => {
245//! eprintln!("JSON parsing error: {}", e);
246//! }
247//! Err(e) => {
248//! eprintln!("Other error: {}", e);
249//! }
250//! }
251//! }
252//! ```
253
254pub mod request;
255pub mod response;
256
257#[cfg(test)]
258mod tests {
259 use crate::common::{
260 message::{Content, Message},
261 parameters::ParameterProperty,
262 role::Role,
263 structured_output::Schema,
264 tool::Tool,
265 };
266 use crate::responses::request::{Include, ReasoningEffort, ReasoningSummary, Responses, Truncation};
267 use serde::Deserialize;
268 use std::sync::Once;
269 use tracing_subscriber::EnvFilter;
270
271 static INIT: Once = Once::new();
272 fn init_tracing() {
273 INIT.call_once(|| {
274 // `RUST_LOG` 環境変数があればそれを使い、なければ "info"
275 let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
276 // try_init()を使用してsubscriberが既に設定されている場合はスキップ
277 let _ = tracing_subscriber::fmt()
278 .with_env_filter(filter)
279 .with_test_writer() // `cargo test` / nextest 用
280 .try_init();
281 });
282 }
283
284 #[test_log::test(tokio::test)]
285 async fn test_init_with_endpoint() {
286 init_tracing();
287 let mut responses = Responses::from_endpoint("https://api.openai.com/v1/responses");
288 responses.model_id("gpt-5-mini");
289 responses.instructions("test instructions");
290 responses.str_message("Hello world!");
291
292 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
293 tracing::info!("Request body: {}", body_json);
294
295 let mut counter = 3;
296 loop {
297 match responses.complete().await {
298 Ok(res) => {
299 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
300
301 // Find the message output in the response
302 let message_output = res.output_text().unwrap();
303 tracing::info!("Message output: {}", message_output);
304 assert!(message_output.len() > 0);
305 break;
306 }
307 Err(e) => {
308 tracing::error!("Error: {} (retrying... {})", e, counter);
309 counter -= 1;
310 if counter == 0 {
311 assert!(false, "Failed to complete responses after 3 attempts");
312 }
313 }
314 }
315 }
316 }
317
318 #[test_log::test(tokio::test)]
319 async fn test_responses_with_plain_text() {
320 init_tracing();
321 let mut responses = Responses::new();
322 responses.model_id("gpt-5-mini");
323 responses.instructions("test instructions");
324 responses.str_message("Hello world!");
325
326 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
327 tracing::info!("Request body: {}", body_json);
328
329 let mut counter = 3;
330 loop {
331 match responses.complete().await {
332 Ok(res) => {
333 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
334
335 // Find the message output in the response
336 let message_output = res.output_text().unwrap();
337 tracing::info!("Message output: {}", message_output);
338 assert!(message_output.len() > 0);
339 break;
340 }
341 Err(e) => {
342 tracing::error!("Error: {} (retrying... {})", e, counter);
343 counter -= 1;
344 if counter == 0 {
345 assert!(false, "Failed to complete responses after 3 attempts");
346 }
347 }
348 }
349 }
350 }
351
352 #[test_log::test(tokio::test)]
353 async fn test_responses_with_messages() {
354 init_tracing();
355 let mut responses = Responses::new();
356 responses.model_id("gpt-5-mini");
357 responses.instructions("test instructions");
358 let messages = vec![Message::from_string(Role::User, "Hello world!")];
359 responses.messages(messages);
360
361 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
362 tracing::info!("Request body: {}", body_json);
363
364 let mut counter = 3;
365 loop {
366 match responses.complete().await {
367 Ok(res) => {
368 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
369
370 // Find the message output in the response
371 let message_output = res.output_text().unwrap();
372 tracing::info!("Message output: {}", message_output);
373 assert!(message_output.len() > 0);
374 break;
375 }
376 Err(e) => {
377 tracing::error!("Error: {} (retrying... {})", e, counter);
378 counter -= 1;
379 if counter == 0 {
380 assert!(false, "Failed to complete responses after 3 attempts");
381 }
382 }
383 }
384 }
385 }
386
387 #[test_log::test(tokio::test)]
388 async fn test_responses_with_multi_turn_conversations() {
389 init_tracing();
390
391 // First interaction
392 let mut responses = Responses::new();
393 responses.model_id("gpt-5-mini");
394 let messages = vec![Message::from_string(Role::User, "Hello!")];
395 responses.messages(messages);
396
397 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
398 tracing::info!("Request body: {}", body_json);
399
400 let conversation_id: String;
401 let mut counter = 3;
402 loop {
403 match responses.complete().await {
404 Ok(res) => {
405 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
406
407 // Find the message output in the response
408 let message_output = res.output_text().unwrap();
409 tracing::info!("Message output: {}", message_output);
410 assert!(message_output.len() > 0);
411
412 // Save the conversation ID for the next turn
413 conversation_id = res.id.as_ref().unwrap().clone();
414
415 break;
416 }
417 Err(e) => {
418 tracing::error!("Error: {} (retrying... {})", e, counter);
419 counter -= 1;
420 if counter == 0 {
421 assert!(false, "Failed to complete responses after 3 attempts");
422 }
423 }
424 }
425 }
426
427 // Second interaction in the same conversation
428 let mut responses = Responses::new();
429 responses.model_id("gpt-5-mini");
430 let messages = vec![Message::from_string(Role::User, "What's the weather like today?")];
431 responses.messages(messages);
432 responses.previous_response_id(conversation_id);
433
434 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
435 tracing::info!("Request body for second turn: {}", body_json);
436
437 let mut counter = 3;
438 loop {
439 match responses.complete().await {
440 Ok(res) => {
441 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
442
443 // Find the message output in the response
444 let message_output = res.output_text().unwrap();
445 tracing::info!("Message output: {}", message_output);
446 assert!(message_output.len() > 0);
447 break;
448 }
449 Err(e) => {
450 tracing::error!("Error: {} (retrying... {})", e, counter);
451 counter -= 1;
452 if counter == 0 {
453 assert!(false, "Failed to complete responses after 3 attempts");
454 }
455 }
456 }
457 }
458 }
459
460 #[test_log::test(tokio::test)]
461 async fn test_responses_with_tools() {
462 init_tracing();
463 let mut responses = Responses::new();
464 responses.model_id("gpt-5-mini");
465 responses.instructions("test instructions");
466 let messages = vec![Message::from_string(Role::User, "Calculate 2 + 2 using a calculator tool.")];
467 responses.messages(messages);
468
469 let tool = Tool::function(
470 "calculator",
471 "A simple calculator tool",
472 vec![("a", ParameterProperty::from_number("The first number")), ("b", ParameterProperty::from_number("The second number"))],
473 false,
474 );
475 responses.tools(vec![tool]);
476
477 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
478 println!("Request body: {}", body_json);
479
480 let mut counter = 3;
481 loop {
482 match responses.complete().await {
483 Ok(res) => {
484 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
485
486 // Find the function_call output in the response
487 let function_call_output =
488 res.output.as_ref().unwrap().iter().find(|output| output.type_name.as_ref().unwrap() == "function_call").unwrap();
489 assert_eq!(function_call_output.type_name.as_ref().unwrap(), "function_call");
490 assert_eq!(function_call_output.name.as_ref().unwrap(), "calculator");
491 assert!(function_call_output.call_id.as_ref().unwrap().len() > 0);
492 break;
493 }
494 Err(e) => {
495 tracing::error!("Error: {} (retrying... {})", e, counter);
496 counter -= 1;
497 if counter == 0 {
498 assert!(false, "Failed to complete responses after 3 attempts");
499 }
500 }
501 }
502 }
503 }
504
505 #[test_log::test(tokio::test)]
506 async fn test_responses_with_json_schema() {
507 #[derive(Debug, Deserialize)]
508 struct Country {
509 pub name: String,
510 pub population: String,
511 }
512 #[derive(Debug, Deserialize)]
513 struct TestResponse {
514 pub capital: String,
515 pub countries: Vec<Country>,
516 }
517
518 init_tracing();
519 let mut responses = Responses::new();
520 responses.model_id("gpt-5-mini");
521
522 let messages = vec![Message::from_string(Role::User, "What is the capital of France? Also, list some countries with their population.")];
523 responses.messages(messages);
524
525 let mut schema = Schema::responses_json_schema("capital");
526 schema.add_property("capital", "string", "The capital city of France");
527 schema.add_array("countries", vec![("name", "string"), ("population", "string")]);
528 responses.structured_output(schema);
529
530 let mut counter = 3;
531 loop {
532 match responses.complete().await {
533 Ok(res) => {
534 tracing::info!("Response: {}", serde_json::to_string_pretty(&res).unwrap());
535
536 // Find the message output in the response
537 let res = serde_json::from_str::<TestResponse>(&res.output_text().unwrap()).unwrap();
538 assert_eq!(res.capital, "Paris");
539 assert!(res.countries.len() > 0);
540 for country in res.countries.iter() {
541 assert!(country.name.len() > 0);
542 assert!(country.population.len() > 0);
543 }
544 break;
545 }
546 Err(e) => {
547 tracing::error!("Error: {} (retrying... {})", e, counter);
548 counter -= 1;
549 if counter == 0 {
550 assert!(false, "Failed to complete responses after 3 attempts");
551 }
552 }
553 }
554 }
555 }
556
557 #[test_log::test(tokio::test)]
558 async fn test_responses_with_image_input() {
559 init_tracing();
560
561 let mut responses = Responses::new();
562 responses.model_id("gpt-5-mini").messages(vec![Message::from_message_array(
563 Role::User,
564 vec![
565 Content::from_text("What do you see in this image?"),
566 Content::from_image_url(
567 "https://images.ctfassets.net/kftzwdyauwt9/1cFVP33AOU26mMJmCGDo1S/0029938b700b84cd7caed52124ed508d/OAI_BrandPage_11.png",
568 ),
569 ],
570 )]);
571
572 let mut counter = 3;
573 loop {
574 match responses.complete().await {
575 Ok(res) => {
576 for output in res.output.unwrap().iter() {
577 if output.type_name.as_ref().unwrap() == "message" {
578 tracing::info!("Image URL: {}", output.content.as_ref().unwrap()[0].text.as_ref().unwrap());
579 }
580 }
581 break;
582 }
583 Err(e) => {
584 tracing::error!("Error: {} (retrying... {})", e, counter);
585 counter -= 1;
586 if counter == 0 {
587 assert!(false, "Failed to complete responses after 3 attempts");
588 }
589 }
590 }
591 }
592 }
593
594 #[test_log::test(tokio::test)]
595 async fn test_error_handling_missing_messages() {
596 init_tracing();
597
598 let mut responses = Responses::new();
599
600 // Set basic required parameters without messages
601 responses.model_id("gpt-4o-mini");
602 let response = responses.complete().await;
603 tracing::info!("Response result: {:?}", response);
604 assert!(response.is_err(), "Expected error due to missing messages");
605 }
606
607 #[test_log::test(tokio::test)]
608 async fn test_error_handling_empty_messages() {
609 init_tracing();
610
611 let mut responses = Responses::new();
612
613 // Set basic required parameters without messages
614 responses.model_id("gpt-4o-mini");
615 responses.messages(vec![]); // Empty messages
616 let response = responses.complete().await;
617 tracing::info!("Response result: {:?}", response);
618 assert!(response.is_err(), "Expected error due to empty messages");
619 }
620
621 #[test_log::test]
622 fn test_optional_parameters() {
623 // TODO: Test whether optional parameters are correctly reflected in the actual API response
624 init_tracing();
625
626 let mut responses = Responses::new();
627
628 // Set basic required parameters
629 responses.model_id("gpt-4o-mini");
630 responses.str_message("Write a short poem about programming in exactly 50 words.");
631
632 // Test various optional parameters
633 responses.temperature(0.7); // Creativity control
634 responses.max_output_tokens(100); // Output length limit
635 responses.max_tool_calls(2); // Tool call limit
636 responses.parallel_tool_calls(true); // Parallel tool execution
637 responses.store(false); // Storage preference
638 responses.stream(false); // Streaming disabled
639 responses.top_logprobs(3); // Log probabilities
640 responses.top_p(0.9); // Nucleus sampling
641 responses.truncation(Truncation::Auto); // Input truncation
642
643 // Add metadata for tracking
644 responses.metadata("test_type".to_string(), serde_json::Value::String("optional_params".to_string()));
645 responses.metadata("version".to_string(), serde_json::Value::String("1".to_string()));
646 responses.metadata("debug".to_string(), serde_json::Value::String("true".to_string()));
647
648 // Set conversation tracking
649 responses.conversation("conv-test-conversation-123");
650 responses.safety_identifier("moderate");
651 responses.service_tier("default");
652
653 // Add reasoning configuration
654 responses.reasoning(ReasoningEffort::Medium, ReasoningSummary::Concise);
655
656 // Set background processing and includes (using valid API values)
657 responses.background(false);
658 responses.include(vec![
659 Include::WebSearchCall, // "web_search_call.results"
660 Include::ReasoningEncryptedContent, // "reasoning.encrypted_content"
661 ]);
662
663 let body_json = serde_json::to_string_pretty(&responses.request_body).unwrap();
664 tracing::info!("Request body with optional parameters: {}", body_json);
665
666 // Verify that all optional parameters are set correctly in the request body
667 assert_eq!(responses.request_body.temperature, Some(0.7));
668 assert_eq!(responses.request_body.max_output_tokens, Some(100));
669 assert_eq!(responses.request_body.max_tool_calls, Some(2));
670 assert_eq!(responses.request_body.parallel_tool_calls, Some(true));
671 assert_eq!(responses.request_body.store, Some(false));
672 assert_eq!(responses.request_body.stream, Some(false));
673 assert_eq!(responses.request_body.top_logprobs, Some(3));
674 assert_eq!(responses.request_body.top_p, Some(0.9));
675 assert!(matches!(responses.request_body.truncation, Some(Truncation::Auto)));
676 assert_eq!(responses.request_body.conversation, Some("conv-test-conversation-123".to_string()));
677 assert_eq!(responses.request_body.safety_identifier, Some("moderate".to_string()));
678 assert_eq!(responses.request_body.service_tier, Some("default".to_string()));
679 assert_eq!(responses.request_body.background, Some(false));
680 assert!(responses.request_body.metadata.is_some());
681 assert!(responses.request_body.reasoning.is_some());
682 assert!(responses.request_body.include.is_some());
683
684 // Verify metadata content
685 let metadata = responses.request_body.metadata.as_ref().unwrap();
686 assert_eq!(metadata.get("test_type"), Some(&serde_json::Value::String("optional_params".to_string())));
687 assert_eq!(metadata.get("version"), Some(&serde_json::Value::String("1".to_string())));
688 assert_eq!(metadata.get("debug"), Some(&serde_json::Value::String("true".to_string())));
689
690 // Verify reasoning configuration
691 let reasoning = responses.request_body.reasoning.as_ref().unwrap();
692 assert!(matches!(reasoning.effort, Some(ReasoningEffort::Medium)));
693 assert!(matches!(reasoning.summary, Some(ReasoningSummary::Concise)));
694
695 // Verify include fields
696 let includes = responses.request_body.include.as_ref().unwrap();
697 assert!(includes.contains(&Include::WebSearchCall));
698 assert!(includes.contains(&Include::ReasoningEncryptedContent));
699
700 // Execute the request to ensure it works with all parameters
701 // Note: For testing purposes, we'll verify the request body is properly formatted
702 // instead of making an actual API call which would require valid API keys and usage costs
703
704 // Verify that the request body can be serialized to JSON without errors
705 let json_result = serde_json::to_string_pretty(&responses.request_body);
706 assert!(json_result.is_ok(), "Failed to serialize request body to JSON: {:?}", json_result.err());
707
708 let json_body = json_result.unwrap();
709 tracing::info!("Successfully serialized request body with all optional parameters");
710
711 // Verify key fields are present in the JSON
712 assert!(json_body.contains("\"temperature\": 0.7"));
713 assert!(json_body.contains("\"max_output_tokens\": 100"));
714 assert!(json_body.contains("\"reasoning\""));
715 assert!(json_body.contains("\"include\""));
716 assert!(json_body.contains("\"metadata\""));
717
718 tracing::info!("All optional parameters test passed successfully");
719 }
720}