pub mod request;
pub mod response;
#[cfg(test)]
mod tests {
use crate::common::{
message::{Content, Message},
models::ChatModel,
parameters::ParameterProperty,
role::Role,
structured_output::Schema,
tool::Tool,
};
use crate::responses::request::{Include, ReasoningEffort, ReasoningSummary, Responses, TextConfig, TextVerbosity, Truncation};
#[test]
fn test_responses_builder_model() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
assert_eq!(responses.request_body.model, ChatModel::Gpt4oMini);
}
#[test]
fn test_responses_builder_instructions() {
let mut responses = Responses::new();
responses.instructions("You are a helpful assistant.");
assert_eq!(responses.request_body.instructions, Some("You are a helpful assistant.".to_string()));
}
#[test]
fn test_responses_builder_str_message() {
let mut responses = Responses::new();
responses.str_message("Hello world!");
assert_eq!(responses.request_body.plain_text_input, Some("Hello world!".to_string()));
}
#[test]
fn test_responses_builder_messages() {
let mut responses = Responses::new();
let messages = vec![Message::from_string(Role::User, "Hello!")];
responses.messages(messages);
assert!(responses.request_body.messages_input.is_some());
assert_eq!(responses.request_body.messages_input.as_ref().unwrap().len(), 1);
}
#[test]
fn test_responses_builder_tools() {
let mut responses = Responses::new();
let tool = Tool::function(
"calculator",
"A simple calculator",
vec![("a", ParameterProperty::from_number("First number")), ("b", ParameterProperty::from_number("Second number"))],
false,
);
responses.tools(vec![tool]);
assert!(responses.request_body.tools.is_some());
assert_eq!(responses.request_body.tools.as_ref().unwrap().len(), 1);
}
#[test]
fn test_responses_builder_structured_output() {
let mut responses = Responses::new();
let mut schema = Schema::responses_json_schema("test");
schema.add_property("name", "string", "A name");
responses.structured_output(schema);
assert!(responses.request_body.structured_output.is_some());
}
#[test]
fn test_responses_builder_temperature() {
let mut responses = Responses::new();
responses.temperature(0.7);
assert_eq!(responses.request_body.temperature, Some(0.7));
}
#[test]
#[should_panic(expected = "Temperature must be between 0.0 and 2.0")]
fn test_responses_builder_temperature_invalid() {
let mut responses = Responses::new();
responses.temperature(3.0); }
#[test]
fn test_responses_builder_max_output_tokens() {
let mut responses = Responses::new();
responses.max_output_tokens(100);
assert_eq!(responses.request_body.max_output_tokens, Some(100));
}
#[test]
fn test_responses_builder_metadata() {
let mut responses = Responses::new();
responses.metadata("key1".to_string(), serde_json::Value::String("value1".to_string()));
responses.metadata("key2".to_string(), serde_json::Value::Number(42.into()));
let metadata = responses.request_body.metadata.as_ref().unwrap();
assert_eq!(metadata.get("key1"), Some(&serde_json::Value::String("value1".to_string())));
assert_eq!(metadata.get("key2"), Some(&serde_json::Value::Number(42.into())));
}
#[test]
fn test_responses_builder_reasoning() {
let mut responses = Responses::new();
responses.reasoning(ReasoningEffort::High, ReasoningSummary::Detailed);
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert!(matches!(reasoning.effort, Some(ReasoningEffort::High)));
assert!(matches!(reasoning.summary, Some(ReasoningSummary::Detailed)));
}
#[test]
fn test_responses_builder_include() {
let mut responses = Responses::new();
responses.include(vec![Include::WebSearchCall, Include::ReasoningEncryptedContent]);
let includes = responses.request_body.include.as_ref().unwrap();
assert!(includes.contains(&Include::WebSearchCall));
assert!(includes.contains(&Include::ReasoningEncryptedContent));
}
#[test]
fn test_responses_builder_truncation() {
let mut responses = Responses::new();
responses.truncation(Truncation::Auto);
assert!(matches!(responses.request_body.truncation, Some(Truncation::Auto)));
responses.truncation(Truncation::Disabled);
assert!(matches!(responses.request_body.truncation, Some(Truncation::Disabled)));
}
#[test]
fn test_content_from_text() {
let content = Content::from_text("Hello!");
let json = serde_json::to_string(&content).unwrap();
assert!(json.contains("Hello!"));
}
#[test]
fn test_content_from_image_url() {
let content = Content::from_image_url("https://example.com/image.png");
let json = serde_json::to_string(&content).unwrap();
assert!(json.contains("https://example.com/image.png"));
}
#[test]
fn test_message_from_message_array() {
let message = Message::from_message_array(
Role::User,
vec![Content::from_text("Look at this:"), Content::from_image_url("https://example.com/img.png")],
);
assert_eq!(message.role, Role::User);
}
#[test]
fn test_request_body_serialization() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test message");
responses.temperature(0.5);
responses.max_output_tokens(100);
let json = serde_json::to_string(&responses.request_body).unwrap();
assert!(json.contains("gpt-4o-mini"));
assert!(json.contains("Test message"));
}
#[test]
fn test_optional_parameters_serialization() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test");
responses.temperature(0.7);
responses.max_output_tokens(100);
responses.max_tool_calls(2);
responses.parallel_tool_calls(true);
responses.store(false);
responses.stream(false);
responses.top_logprobs(3);
responses.top_p(0.9);
responses.truncation(Truncation::Auto);
responses.conversation("conv-test-123");
responses.safety_identifier("moderate");
responses.service_tier("default");
responses.background(false);
responses.reasoning(ReasoningEffort::Medium, ReasoningSummary::Concise);
responses.include(vec![Include::WebSearchCall]);
responses.metadata("key".to_string(), serde_json::Value::String("value".to_string()));
assert_eq!(responses.request_body.temperature, Some(0.7));
assert_eq!(responses.request_body.max_output_tokens, Some(100));
assert_eq!(responses.request_body.max_tool_calls, Some(2));
assert_eq!(responses.request_body.parallel_tool_calls, Some(true));
assert_eq!(responses.request_body.store, Some(false));
assert_eq!(responses.request_body.stream, Some(false));
assert_eq!(responses.request_body.top_logprobs, Some(3));
assert_eq!(responses.request_body.top_p, Some(0.9));
assert!(matches!(responses.request_body.truncation, Some(Truncation::Auto)));
assert_eq!(responses.request_body.conversation, Some("conv-test-123".to_string()));
assert_eq!(responses.request_body.safety_identifier, Some("moderate".to_string()));
assert_eq!(responses.request_body.service_tier, Some("default".to_string()));
assert_eq!(responses.request_body.background, Some(false));
let json_result = serde_json::to_string_pretty(&responses.request_body);
assert!(json_result.is_ok());
let json_body = json_result.unwrap();
assert!(json_body.contains("\"temperature\": 0.7"));
assert!(json_body.contains("\"max_output_tokens\": 100"));
assert!(json_body.contains("\"reasoning\""));
assert!(json_body.contains("\"include\""));
assert!(json_body.contains("\"metadata\""));
}
#[test]
fn test_reasoning_model_detection_o1() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.temperature(0.5);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.model, ChatModel::O1);
}
#[test]
fn test_reasoning_model_detection_o3() {
let mut responses = Responses::new();
responses.model(ChatModel::O3Mini);
responses.str_message("Test");
responses.temperature(0.3);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.model, ChatModel::O3Mini);
}
#[test]
fn test_non_reasoning_model() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4o);
responses.str_message("Test");
responses.temperature(0.7);
assert_eq!(responses.request_body.temperature, Some(0.7));
assert_eq!(responses.request_body.model, ChatModel::Gpt4o);
}
#[test]
fn test_reasoning_model_with_default_temperature() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.temperature(1.0);
assert_eq!(responses.request_body.temperature, Some(1.0));
}
#[test]
fn test_reasoning_effort_none() {
let effort = ReasoningEffort::None;
let json = serde_json::to_string(&effort).unwrap();
assert_eq!(json, "\"none\"");
}
#[test]
fn test_reasoning_effort_xhigh() {
let effort = ReasoningEffort::Xhigh;
let json = serde_json::to_string(&effort).unwrap();
assert_eq!(json, "\"xhigh\"");
}
#[test]
fn test_reasoning_effort_all_variants() {
assert_eq!(serde_json::to_string(&ReasoningEffort::None).unwrap(), "\"none\"");
assert_eq!(serde_json::to_string(&ReasoningEffort::Minimal).unwrap(), "\"minimal\"");
assert_eq!(serde_json::to_string(&ReasoningEffort::Low).unwrap(), "\"low\"");
assert_eq!(serde_json::to_string(&ReasoningEffort::Medium).unwrap(), "\"medium\"");
assert_eq!(serde_json::to_string(&ReasoningEffort::High).unwrap(), "\"high\"");
assert_eq!(serde_json::to_string(&ReasoningEffort::Xhigh).unwrap(), "\"xhigh\"");
}
#[test]
fn test_text_verbosity_low() {
let verbosity = TextVerbosity::Low;
let json = serde_json::to_string(&verbosity).unwrap();
assert_eq!(json, "\"low\"");
}
#[test]
fn test_text_verbosity_medium() {
let verbosity = TextVerbosity::Medium;
let json = serde_json::to_string(&verbosity).unwrap();
assert_eq!(json, "\"medium\"");
}
#[test]
fn test_text_verbosity_high() {
let verbosity = TextVerbosity::High;
let json = serde_json::to_string(&verbosity).unwrap();
assert_eq!(json, "\"high\"");
}
#[test]
fn test_text_config_serialization() {
let config = TextConfig { verbosity: Some(TextVerbosity::High) };
let json = serde_json::to_string(&config).unwrap();
assert!(json.contains("\"verbosity\":\"high\""));
}
#[test]
fn test_responses_builder_text_verbosity() {
let mut responses = Responses::new();
responses.text_verbosity(TextVerbosity::Low);
assert!(responses.request_body.text.is_some());
let text = responses.request_body.text.as_ref().unwrap();
assert_eq!(text.verbosity, Some(TextVerbosity::Low));
}
#[test]
fn test_responses_builder_reasoning_with_none() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.reasoning(ReasoningEffort::None, ReasoningSummary::Auto);
assert!(responses.request_body.reasoning.is_some());
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert_eq!(reasoning.effort, Some(ReasoningEffort::None));
}
#[test]
fn test_responses_builder_reasoning_with_xhigh() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.reasoning(ReasoningEffort::Xhigh, ReasoningSummary::Detailed);
assert!(responses.request_body.reasoning.is_some());
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert_eq!(reasoning.effort, Some(ReasoningEffort::Xhigh));
}
#[test]
fn test_request_body_with_text_serialization() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.str_message("Test");
responses.text_verbosity(TextVerbosity::High);
let json_body = serde_json::to_string(&responses.request_body).unwrap();
assert!(json_body.contains("\"text\""));
assert!(json_body.contains("\"verbosity\":\"high\""));
}
use crate::responses::request::{NamedFunctionChoice, Prompt, ToolChoice, ToolChoiceMode};
#[test]
fn test_tool_choice_mode_auto_serialization() {
let mode = ToolChoiceMode::Auto;
let json = serde_json::to_string(&mode).unwrap();
assert_eq!(json, "\"auto\"");
}
#[test]
fn test_tool_choice_mode_none_serialization() {
let mode = ToolChoiceMode::None;
let json = serde_json::to_string(&mode).unwrap();
assert_eq!(json, "\"none\"");
}
#[test]
fn test_tool_choice_mode_required_serialization() {
let mode = ToolChoiceMode::Required;
let json = serde_json::to_string(&mode).unwrap();
assert_eq!(json, "\"required\"");
}
#[test]
fn test_named_function_choice_new() {
let choice = NamedFunctionChoice::new("get_weather");
assert_eq!(choice.type_name, "function");
assert_eq!(choice.name, "get_weather");
}
#[test]
fn test_named_function_choice_serialization() {
let choice = NamedFunctionChoice::new("calculate");
let json = serde_json::to_string(&choice).unwrap();
assert!(json.contains("\"type\":\"function\""));
assert!(json.contains("\"name\":\"calculate\""));
}
#[test]
fn test_tool_choice_simple_serialization() {
let choice = ToolChoice::Simple(ToolChoiceMode::Auto);
let json = serde_json::to_string(&choice).unwrap();
assert_eq!(json, "\"auto\"");
let choice = ToolChoice::Simple(ToolChoiceMode::Required);
let json = serde_json::to_string(&choice).unwrap();
assert_eq!(json, "\"required\"");
}
#[test]
fn test_tool_choice_function_serialization() {
let choice = ToolChoice::Function(NamedFunctionChoice::new("search"));
let json = serde_json::to_string(&choice).unwrap();
assert!(json.contains("\"type\":\"function\""));
assert!(json.contains("\"name\":\"search\""));
}
#[test]
fn test_prompt_new() {
let prompt = Prompt::new("prompt-abc123");
assert_eq!(prompt.id, "prompt-abc123");
assert!(prompt.variables.is_none());
}
#[test]
fn test_prompt_with_variables() {
let mut vars = std::collections::HashMap::new();
vars.insert("name".to_string(), "Alice".to_string());
vars.insert("topic".to_string(), "AI".to_string());
let prompt = Prompt::with_variables("prompt-xyz", vars);
assert_eq!(prompt.id, "prompt-xyz");
assert!(prompt.variables.is_some());
let variables = prompt.variables.as_ref().unwrap();
assert_eq!(variables.get("name"), Some(&"Alice".to_string()));
assert_eq!(variables.get("topic"), Some(&"AI".to_string()));
}
#[test]
fn test_prompt_serialization_without_variables() {
let prompt = Prompt::new("prompt-123");
let json = serde_json::to_string(&prompt).unwrap();
assert!(json.contains("\"id\":\"prompt-123\""));
assert!(!json.contains("variables"));
}
#[test]
fn test_prompt_serialization_with_variables() {
let mut vars = std::collections::HashMap::new();
vars.insert("key".to_string(), "value".to_string());
let prompt = Prompt::with_variables("prompt-456", vars);
let json = serde_json::to_string(&prompt).unwrap();
assert!(json.contains("\"id\":\"prompt-456\""));
assert!(json.contains("\"variables\""));
assert!(json.contains("\"key\":\"value\""));
}
#[test]
fn test_responses_builder_tool_choice_simple() {
let mut responses = Responses::new();
responses.tool_choice(ToolChoice::Simple(ToolChoiceMode::Required));
assert!(responses.request_body.tool_choice.is_some());
}
#[test]
fn test_responses_builder_tool_choice_function() {
let mut responses = Responses::new();
responses.tool_choice(ToolChoice::Function(NamedFunctionChoice::new("my_function")));
assert!(responses.request_body.tool_choice.is_some());
}
#[test]
fn test_responses_builder_prompt() {
let mut responses = Responses::new();
responses.prompt(Prompt::new("prompt-test"));
assert!(responses.request_body.prompt.is_some());
assert_eq!(responses.request_body.prompt.as_ref().unwrap().id, "prompt-test");
}
#[test]
fn test_responses_builder_prompt_cache_key() {
let mut responses = Responses::new();
responses.prompt_cache_key("my-cache-key");
assert_eq!(responses.request_body.prompt_cache_key, Some("my-cache-key".to_string()));
}
#[test]
fn test_responses_builder_prompt_cache_retention() {
let mut responses = Responses::new();
responses.prompt_cache_retention("24h");
assert_eq!(responses.request_body.prompt_cache_retention, Some("24h".to_string()));
}
#[test]
fn test_request_body_with_tool_choice_serialization() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test");
responses.tool_choice(ToolChoice::Simple(ToolChoiceMode::Auto));
let json_body = serde_json::to_string(&responses.request_body).unwrap();
assert!(json_body.contains("\"tool_choice\":\"auto\""));
}
#[test]
fn test_request_body_with_prompt_serialization() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test");
responses.prompt(Prompt::new("prompt-id"));
responses.prompt_cache_key("cache-key");
responses.prompt_cache_retention("1h");
let json_body = serde_json::to_string(&responses.request_body).unwrap();
assert!(json_body.contains("\"prompt\""));
assert!(json_body.contains("\"prompt_cache_key\":\"cache-key\""));
assert!(json_body.contains("\"prompt_cache_retention\":\"1h\""));
}
#[test]
fn test_o1_ignores_non_default_top_p() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.top_p(0.9);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_o3_mini_ignores_non_default_top_p() {
let mut responses = Responses::new();
responses.model(ChatModel::O3Mini);
responses.str_message("Test");
responses.top_p(0.5);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_gpt5_2_ignores_non_default_top_p() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.str_message("Test");
responses.top_p(0.8);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_gpt5_1_ignores_non_default_temperature() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_1);
responses.str_message("Test");
responses.temperature(0.5);
assert_eq!(responses.request_body.temperature, None);
}
#[test]
fn test_reasoning_model_accepts_default_top_p() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.top_p(1.0);
assert_eq!(responses.request_body.top_p, Some(1.0));
}
#[test]
fn test_reasoning_model_accepts_default_temperature() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.str_message("Test");
responses.temperature(1.0);
assert_eq!(responses.request_body.temperature, Some(1.0));
}
#[test]
fn test_standard_model_accepts_all_top_p_values() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test");
responses.top_p(0.1);
assert_eq!(responses.request_body.top_p, Some(0.1));
responses.top_p(0.5);
assert_eq!(responses.request_body.top_p, Some(0.5));
responses.top_p(1.0);
assert_eq!(responses.request_body.top_p, Some(1.0));
}
#[test]
fn test_standard_model_accepts_all_temperature_values() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4o);
responses.str_message("Test");
responses.temperature(0.0);
assert_eq!(responses.request_body.temperature, Some(0.0));
responses.temperature(1.0);
assert_eq!(responses.request_body.temperature, Some(1.0));
responses.temperature(2.0);
assert_eq!(responses.request_body.temperature, Some(2.0));
}
#[test]
fn test_o_series_ignores_multiple_restricted_parameters() {
let mut responses = Responses::new();
responses.model(ChatModel::O4Mini);
responses.str_message("Test");
responses.temperature(0.7);
responses.top_p(0.9);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_gpt5_series_ignores_multiple_restricted_parameters() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2Pro);
responses.str_message("Test");
responses.temperature(0.5);
responses.top_p(0.8);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_custom_gpt5_model_ignores_restricted_parameters() {
let mut responses = Responses::new();
responses.model(ChatModel::custom("gpt-5.3-preview"));
responses.str_message("Test");
responses.temperature(0.5);
responses.top_p(0.9);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_custom_o1_model_ignores_restricted_parameters() {
let mut responses = Responses::new();
responses.model(ChatModel::custom("o1-high"));
responses.str_message("Test");
responses.temperature(0.5);
responses.top_p(0.8);
assert_eq!(responses.request_body.temperature, None);
assert_eq!(responses.request_body.top_p, None);
}
#[test]
fn test_custom_standard_model_accepts_all_parameters() {
let mut responses = Responses::new();
responses.model(ChatModel::custom("ft:gpt-4o-mini:org::123"));
responses.str_message("Test");
responses.temperature(0.7);
responses.top_p(0.9);
assert_eq!(responses.request_body.temperature, Some(0.7));
assert_eq!(responses.request_body.top_p, Some(0.9));
}
#[test]
fn test_o1_accepts_reasoning_parameter() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.reasoning(ReasoningEffort::High, ReasoningSummary::Detailed);
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert_eq!(reasoning.effort, Some(ReasoningEffort::High));
assert_eq!(reasoning.summary, Some(ReasoningSummary::Detailed));
}
#[test]
fn test_gpt5_2_accepts_all_reasoning_efforts() {
let efforts = vec![
ReasoningEffort::None,
ReasoningEffort::Minimal,
ReasoningEffort::Low,
ReasoningEffort::Medium,
ReasoningEffort::High,
ReasoningEffort::Xhigh,
];
for effort in efforts {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.str_message("Test");
responses.reasoning(effort.clone(), ReasoningSummary::Auto);
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert_eq!(reasoning.effort, Some(effort.clone()), "GPT-5.2 should accept ReasoningEffort::{:?}", effort);
}
}
#[test]
fn test_gpt5_1_accepts_all_reasoning_summaries() {
let summaries = vec![ReasoningSummary::Auto, ReasoningSummary::Concise, ReasoningSummary::Detailed];
for summary in summaries {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_1);
responses.str_message("Test");
responses.reasoning(ReasoningEffort::Medium, summary.clone());
let reasoning = responses.request_body.reasoning.as_ref().unwrap();
assert_eq!(reasoning.summary, Some(summary.clone()), "GPT-5.1 should accept ReasoningSummary::{:?}", summary);
}
}
#[test]
fn test_standard_model_accepts_top_logprobs() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt4oMini);
responses.str_message("Test");
responses.top_logprobs(5);
assert_eq!(responses.request_body.top_logprobs, Some(5));
}
#[test]
fn test_o1_ignores_top_logprobs() {
let mut responses = Responses::new();
responses.model(ChatModel::O1);
responses.str_message("Test");
responses.top_logprobs(5);
assert_eq!(responses.request_body.top_logprobs, None);
}
#[test]
fn test_gpt5_2_ignores_top_logprobs() {
let mut responses = Responses::new();
responses.model(ChatModel::Gpt5_2);
responses.str_message("Test");
responses.top_logprobs(3);
assert_eq!(responses.request_body.top_logprobs, None);
}
#[test]
fn test_max_output_tokens_accepted_by_all_models() {
let mut responses_standard = Responses::new();
responses_standard.model(ChatModel::Gpt4oMini);
responses_standard.max_output_tokens(1000);
assert_eq!(responses_standard.request_body.max_output_tokens, Some(1000));
let mut responses_o = Responses::new();
responses_o.model(ChatModel::O1);
responses_o.max_output_tokens(2000);
assert_eq!(responses_o.request_body.max_output_tokens, Some(2000));
let mut responses_gpt5 = Responses::new();
responses_gpt5.model(ChatModel::Gpt5_2);
responses_gpt5.max_output_tokens(3000);
assert_eq!(responses_gpt5.request_body.max_output_tokens, Some(3000));
}
#[test]
fn test_store_accepted_by_all_models() {
let models = vec![ChatModel::Gpt4oMini, ChatModel::O1, ChatModel::Gpt5_2];
for model in models {
let mut responses = Responses::new();
responses.model(model.clone());
responses.store(true);
assert_eq!(responses.request_body.store, Some(true), "Model {} should accept store parameter", model.as_str());
}
}
#[test]
fn test_metadata_accepted_by_all_models() {
let models = vec![ChatModel::Gpt4oMini, ChatModel::O3Mini, ChatModel::Gpt5_1];
for model in models {
let mut responses = Responses::new();
responses.model(model.clone());
responses.metadata("key".to_string(), serde_json::Value::String("value".to_string()));
assert!(responses.request_body.metadata.is_some(), "Model {} should accept metadata parameter", model.as_str());
}
}
#[test]
fn test_include_accepted_by_all_models() {
let models = vec![ChatModel::Gpt4o, ChatModel::O1Pro, ChatModel::Gpt5_2Pro];
for model in models {
let mut responses = Responses::new();
responses.model(model.clone());
responses.include(vec![Include::WebSearchCall, Include::ReasoningEncryptedContent]);
assert!(responses.request_body.include.is_some(), "Model {} should accept include parameter", model.as_str());
}
}
}