#[allow(dead_code, unused_imports, unused_variables)]
#[cfg(test)]
mod tests {
use crate::client::{OpenRouterClient, RetryConfig, Unconfigured};
#[allow(unused_imports)]
use crate::models::chat::ChatMessage;
use crate::models::provider_preferences::{
DataCollection, ProviderPreferences, ProviderSort, Quantization,
};
#[allow(unused_imports)]
use crate::models::structured::{JsonSchemaConfig, JsonSchemaDefinition};
#[allow(unused_imports)]
use crate::models::tool::{FunctionCall, FunctionDescription, Tool, ToolCall};
use crate::types::chat::ChatRole;
use crate::types::chat::{
ChatCompletionRequest, ChatCompletionResponse, Message, MessageContent,
};
use crate::types::ids::{ActivityId, GenerationId};
use crate::types::status::{CancellationStatus, StreamingStatus};
use serde_json::{json, Value};
use url::Url;
fn deserialize_chat_response(json_str: &str) -> ChatCompletionResponse {
serde_json::from_str::<ChatCompletionResponse>(json_str).expect("Valid JSON")
}
#[tokio::test]
async fn test_basic_chat_completion() -> Result<(), Box<dyn std::error::Error>> {
let api_key = "sk-1234567890abcdef1234567890abcdef";
let _client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_http_referer("https://github.com/your_org/your_repo")
.with_site_title("OpenRouter Rust SDK Tests")
.with_api_key(api_key);
let _request = ChatCompletionRequest {
model: "openai/gpt-4o".to_string(),
messages: vec![Message::text(
ChatRole::User,
"What is a phantom type in Rust?",
)],
stream: None,
response_format: None,
tools: None,
tool_choice: None,
provider: None,
models: None,
transforms: None,
route: None,
user: None,
max_tokens: None,
temperature: None,
top_p: None,
top_k: None,
frequency_penalty: None,
presence_penalty: None,
repetition_penalty: None,
min_p: None,
top_a: None,
seed: None,
stop: None,
logit_bias: None,
logprobs: None,
top_logprobs: None,
prediction: None,
parallel_tool_calls: None,
verbosity: None,
debug: None,
plugins: None,
reasoning: None,
};
let simulated_response_json = r#"
{
"id": "gen-123",
"choices": [{
"message": {
"role": "assistant",
"content": "A phantom type is a type parameter that is not used in any fields.",
"tool_calls": null
},
"finish_reason": "stop",
"native_finish_reason": "stop"
}],
"created": 1234567890,
"model": "openai/gpt-4o",
"object": "chat.completion",
"usage": {
"prompt_tokens": 10,
"completion_tokens": 15,
"total_tokens": 25
}
}
"#;
let response = deserialize_chat_response(simulated_response_json);
assert!(!response.choices.is_empty());
assert_eq!(response.choices[0].message.role, ChatRole::Assistant);
Ok(())
}
#[tokio::test]
async fn test_valid_tool_call_response() -> Result<(), Box<dyn std::error::Error>> {
let simulated_response_json = r#"
{
"id": "gen-valid-tool",
"choices": [{
"message": {
"role": "assistant",
"content": "Calling tool for weather.",
"tool_calls": [{
"id": "call-001",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"Boston\"}"
}
}]
},
"finish_reason": "tool_calls",
"native_finish_reason": "tool_calls"
}],
"created": 1234567890,
"model": "openai/gpt-4o",
"object": "chat.completion"
}
"#;
let response = deserialize_chat_response(simulated_response_json);
let client = OpenRouterClient::<crate::client::Ready> {
config: crate::client::ClientConfig {
api_key: Some(
crate::client::SecureApiKey::new("sk-1234567890abcdef1234567890abcdef")
.unwrap(),
),
base_url: Url::parse("https://dummy/").unwrap(),
http_referer: None,
site_title: None,
user_id: None, timeout: std::time::Duration::from_secs(30),
retry_config: RetryConfig::default(), max_response_bytes: 10 * 1024 * 1024,
},
http_client: None,
_state: std::marker::PhantomData,
router_config: None,
cached_api_config: None,
providers_cache: None,
};
client.validate_tool_calls(&response)?;
Ok(())
}
#[tokio::test]
async fn test_text_completion_response_deserialization(
) -> Result<(), Box<dyn std::error::Error>> {
let simulated_response_json = r#"
{
"id": "comp-123",
"choices": [
{
"text": "Once upon a time, in a land far, far away...",
"index": 0,
"finish_reason": "stop"
}
]
}
"#;
let response = serde_json::from_str::<crate::types::completion::CompletionResponse>(
simulated_response_json,
)?;
assert!(!response.choices.is_empty());
assert_eq!(response.choices[0].finish_reason.as_deref(), Some("stop"));
assert!(response.choices[0].text.contains("Once upon a time"));
Ok(())
}
#[tokio::test]
async fn test_invalid_tool_call_response() -> Result<(), Box<dyn std::error::Error>> {
let simulated_response_json = r#"
{
"id": "gen-invalid-tool",
"choices": [{
"message": {
"role": "assistant",
"content": "Invalid tool call.",
"tool_calls": [{
"id": "call-002",
"type": "invalid",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"Boston\"}"
}
}]
},
"finish_reason": "tool_calls",
"native_finish_reason": "tool_calls"
}],
"created": 1234567890,
"model": "openai/gpt-4o",
"object": "chat.completion"
}
"#;
let deserialization_result: Result<ChatCompletionResponse, serde_json::Error> =
serde_json::from_str(simulated_response_json);
assert!(deserialization_result.is_err());
Ok(())
}
#[tokio::test]
async fn test_provider_preferences_serialization() -> Result<(), Box<dyn std::error::Error>> {
let preferences = crate::models::provider_preferences::ProviderPreferences {
order: Some(vec!["OpenAI".to_string(), "Anthropic".to_string()]),
allow_fallbacks: Some(false),
require_parameters: Some(true),
data_collection: Some(crate::models::provider_preferences::DataCollection::Deny),
ignore: Some(vec!["Azure".to_string()]),
quantizations: Some(vec![
crate::models::provider_preferences::Quantization::Fp8,
crate::models::provider_preferences::Quantization::Int8,
]),
sort: Some(crate::models::provider_preferences::ProviderSort::Throughput),
};
let extra_params = json!({});
let builder =
crate::api::request::RequestBuilder::new("openai/gpt-4o", vec![], extra_params)
.with_provider_preferences(preferences)
.expect("Provider preferences should be valid");
let payload = builder.build();
let payload_json = serde_json::to_string_pretty(&payload)?;
let payload_value: Value = serde_json::from_str(&payload_json)?;
let provider_config = payload_value.get("provider").expect("provider key missing");
assert_eq!(provider_config.get("allowFallbacks").unwrap(), false);
assert_eq!(provider_config.get("sort").unwrap(), "throughput");
Ok(())
}
#[tokio::test]
async fn test_web_search_response_deserialization() -> Result<(), Box<dyn std::error::Error>> {
let simulated_response_json = r#"
{
"query": "rust programming",
"results": [
{
"title": "The Rust Programming Language",
"url": "https://www.rust-lang.org",
"snippet": "Learn Rust programming."
},
{
"title": "Rust by Example",
"url": "https://doc.rust-lang.org/rust-by-example/",
"snippet": "A collection of runnable examples."
}
],
"total_results": 2
}
"#;
let response: crate::types::web_search::WebSearchResponse =
serde_json::from_str(simulated_response_json)?;
assert_eq!(response.query, "rust programming");
assert_eq!(response.total_results, 2);
assert_eq!(response.results.len(), 2);
Ok(())
}
#[tokio::test]
async fn test_chat_completion_with_provider_preferences(
) -> Result<(), Box<dyn std::error::Error>> {
use crate::models::provider_preferences::{
DataCollection, ProviderPreferences, ProviderSort,
};
use crate::types::chat::{ChatCompletionRequest, Message};
let preferences = ProviderPreferences {
order: Some(vec!["OpenAI".to_string(), "Anthropic".to_string()]),
allow_fallbacks: Some(true),
require_parameters: Some(false),
data_collection: Some(DataCollection::Deny),
ignore: Some(vec!["Azure".to_string()]),
quantizations: None,
sort: Some(ProviderSort::Throughput),
};
let request = ChatCompletionRequest {
model: "openai/gpt-4o".to_string(),
messages: vec![Message::text(
ChatRole::User,
"Hello with provider preferences!",
)],
stream: None,
response_format: None,
tools: None,
tool_choice: None,
provider: Some(preferences),
models: None,
transforms: None,
route: None,
user: None,
max_tokens: None,
temperature: None,
top_p: None,
top_k: None,
frequency_penalty: None,
presence_penalty: None,
repetition_penalty: None,
min_p: None,
top_a: None,
seed: None,
stop: None,
logit_bias: None,
logprobs: None,
top_logprobs: None,
prediction: None,
parallel_tool_calls: None,
verbosity: None,
debug: None,
plugins: None,
reasoning: None,
};
let json = serde_json::to_string_pretty(&request)?;
let parsed: serde_json::Value = serde_json::from_str(&json)?;
let provider_field = parsed.get("provider").expect("Provider field should exist");
assert!(
provider_field.is_object(),
"Provider field should be an object"
);
let order = provider_field
.get("order")
.expect("Order field should exist");
assert!(order.is_array(), "Order should be an array");
let allow_fallbacks = provider_field
.get("allowFallbacks")
.expect("allowFallbacks field should exist");
assert!(
allow_fallbacks.is_boolean(),
"allowFallbacks should be boolean"
);
Ok(())
}
#[tokio::test]
async fn test_streaming_chunk_deserialization() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::ChatCompletionChunk;
let streaming_chunk_json = r#"{
"id": "chatcmpl-123",
"object": "chat.completion.chunk",
"created": 1677652288,
"model": "openai/gpt-4o",
"choices": [{
"index": 0,
"delta": {
"content": "Hello"
},
"finish_reason": null
}]
}"#;
let chunk: ChatCompletionChunk = serde_json::from_str(streaming_chunk_json)?;
assert_eq!(chunk.id, "chatcmpl-123");
assert_eq!(chunk.choices.len(), 1);
assert_eq!(
chunk.choices[0].delta.content,
Some(MessageContent::Text("Hello".to_string()))
);
assert!(chunk.usage.is_none());
let final_chunk_json = r#"{
"id": "chatcmpl-123",
"object": "chat.completion.chunk",
"created": 1677652288,
"model": "openai/gpt-4o",
"choices": [{
"index": 0,
"delta": {},
"finish_reason": "stop"
}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15
}
}"#;
let final_chunk: ChatCompletionChunk = serde_json::from_str(final_chunk_json)?;
assert_eq!(
final_chunk.choices[0].finish_reason,
Some("stop".to_string())
);
assert!(final_chunk.usage.is_some());
let usage = final_chunk.usage.unwrap();
assert_eq!(usage.prompt_tokens, 10);
assert_eq!(usage.completion_tokens, 5);
assert_eq!(usage.total_tokens, 15);
Ok(())
}
#[tokio::test]
async fn test_credits_api_integration() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::credits::{CreditsData, CreditsResponse};
let credits_json = r#"{
"data": {
"total_credits": 150.75,
"total_usage": 45.25
}
}"#;
let credits_response: CreditsResponse = serde_json::from_str(credits_json)?;
assert_eq!(credits_response.total_credits(), 150.75);
assert_eq!(credits_response.total_usage(), 45.25);
assert_eq!(credits_response.remaining_credits(), 105.50);
assert!(credits_response.has_credits());
assert!((credits_response.usage_percentage() - 0.300).abs() < 0.001);
let credits_data = CreditsData {
total_credits: 200.0,
total_usage: 50.0,
};
assert_eq!(credits_data.remaining(), 150.0);
assert!(credits_data.has_credits());
assert_eq!(credits_data.usage_percentage(), 0.25);
let zero_credits = CreditsData {
total_credits: 0.0,
total_usage: 0.0,
};
assert_eq!(zero_credits.remaining(), 0.0);
assert!(!zero_credits.has_credits());
assert_eq!(zero_credits.usage_percentage(), 0.0);
let over_usage = CreditsData {
total_credits: 100.0,
total_usage: 120.0,
};
assert_eq!(over_usage.remaining(), -20.0);
assert!(!over_usage.has_credits());
assert_eq!(over_usage.usage_percentage(), 1.2);
Ok(())
}
#[tokio::test]
async fn test_credits_api_client_integration() -> Result<(), Box<dyn std::error::Error>> {
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_http_referer("https://github.com/your_org/your_repo")
.with_site_title("OpenRouter Rust SDK Tests")
.with_api_key(api_key)?;
let credits_api = client.credits()?;
assert!(credits_api
.config
.base_url
.as_str()
.contains("openrouter.ai"));
Ok(())
}
#[tokio::test]
async fn test_credits_serialization_roundtrip() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::credits::{CreditsData, CreditsResponse};
let original = CreditsResponse {
data: CreditsData {
total_credits: 99.99,
total_usage: 33.33,
},
};
let json_str = serde_json::to_string(&original)?;
let deserialized: CreditsResponse = serde_json::from_str(&json_str)?;
assert_eq!(original, deserialized);
assert_eq!(deserialized.total_credits(), 99.99);
assert_eq!(deserialized.total_usage(), 33.33);
assert_eq!(deserialized.remaining_credits(), 66.66);
Ok(())
}
#[tokio::test]
async fn test_generation_api_integration() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::generation::{GenerationData, GenerationResponse};
let generation_json = r#"{
"data": {
"id": "gen-123456789",
"upstream_id": "upstream-abc123",
"total_cost": 0.025,
"cache_discount": 0.005,
"upstream_inference_cost": 0.020,
"created_at": "2024-01-15T10:30:00Z",
"model": "openai/gpt-4",
"app_id": 12345,
"streamed": true,
"cancelled": false,
"provider_name": "OpenAI",
"latency": 1500,
"moderation_latency": 100,
"generation_time": 1200,
"finish_reason": "stop",
"native_finish_reason": "stop",
"tokens_prompt": 50,
"tokens_completion": 100,
"native_tokens_prompt": 50,
"native_tokens_completion": 100,
"native_tokens_reasoning": 25,
"num_media_prompt": 2,
"num_media_completion": 0,
"num_search_results": 5,
"origin": "api",
"usage": 0.025,
"is_byok": false
}
}"#;
let generation_response: GenerationResponse = serde_json::from_str(generation_json)?;
assert_eq!(generation_response.id(), "gen-123456789");
assert_eq!(generation_response.model(), "openai/gpt-4");
assert_eq!(generation_response.total_cost(), 0.025);
assert_eq!(generation_response.effective_cost(), 0.020);
assert!(generation_response.is_successful());
assert!(generation_response.was_streamed());
assert_eq!(generation_response.total_tokens(), Some(150));
assert!(generation_response.used_web_search());
assert!(generation_response.included_media());
assert!(generation_response.used_reasoning());
let generation_data = GenerationData {
id: GenerationId::new("gen-test"),
upstream_id: None,
total_cost: 0.01,
cache_discount: None,
upstream_inference_cost: None,
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-3.5-turbo".to_string(),
app_id: None,
streamed: StreamingStatus::NotStarted,
cancelled: CancellationStatus::NotCancelled,
provider_name: Some("OpenAI".to_string()),
latency: Some(800),
moderation_latency: None,
generation_time: Some(600),
finish_reason: Some("stop".to_string()),
native_finish_reason: Some("stop".to_string()),
tokens_prompt: Some(20),
tokens_completion: Some(30),
native_tokens_prompt: Some(20),
native_tokens_completion: Some(30),
native_tokens_reasoning: None,
num_media_prompt: None,
num_media_completion: None,
num_search_results: None,
origin: "api".to_string(),
usage: 0.01,
is_byok: false,
};
assert_eq!(generation_data.total_tokens(), Some(50));
assert_eq!(generation_data.total_native_tokens(), Some(50));
assert!(generation_data.is_successful());
assert!(!generation_data.was_streamed());
assert_eq!(generation_data.effective_cost(), 0.01);
assert_eq!(generation_data.cost_per_token(), Some(0.01 / 50.0));
assert_eq!(generation_data.latency_seconds(), Some(0.8));
assert_eq!(generation_data.generation_time_seconds(), Some(0.6));
assert!(!generation_data.used_web_search());
assert!(!generation_data.included_media());
assert!(!generation_data.used_reasoning());
let minimal_generation = GenerationData {
id: GenerationId::new("gen-minimal"),
upstream_id: None,
total_cost: 0.005,
cache_discount: None,
upstream_inference_cost: None,
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-3.5-turbo".to_string(),
app_id: None,
streamed: StreamingStatus::default(),
cancelled: CancellationStatus::default(),
provider_name: None,
latency: None,
moderation_latency: None,
generation_time: None,
finish_reason: None,
native_finish_reason: None,
tokens_prompt: None,
tokens_completion: None,
native_tokens_prompt: None,
native_tokens_completion: None,
native_tokens_reasoning: None,
num_media_prompt: None,
num_media_completion: None,
num_search_results: None,
origin: "api".to_string(),
usage: 0.005,
is_byok: false,
};
assert_eq!(minimal_generation.total_tokens(), None);
assert_eq!(minimal_generation.total_native_tokens(), None);
assert!(minimal_generation.is_successful());
assert!(!minimal_generation.was_streamed());
assert_eq!(minimal_generation.effective_cost(), 0.005);
assert_eq!(minimal_generation.cost_per_token(), None);
assert!(!minimal_generation.used_web_search());
assert!(!minimal_generation.included_media());
assert!(!minimal_generation.used_reasoning());
Ok(())
}
#[tokio::test]
async fn test_generation_api_client_integration() -> Result<(), Box<dyn std::error::Error>> {
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_http_referer("https://github.com/your_org/your_repo")
.with_site_title("OpenRouter Rust SDK Tests")
.with_api_key(api_key)?;
let generation_api = client.generation()?;
assert!(generation_api
.config
.base_url
.as_str()
.contains("openrouter.ai"));
Ok(())
}
#[tokio::test]
async fn test_generation_serialization_roundtrip() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::generation::{GenerationData, GenerationResponse};
let original = GenerationResponse {
data: GenerationData {
id: GenerationId::new("gen-roundtrip"),
upstream_id: Some("upstream-456".to_string()),
total_cost: 0.015,
cache_discount: Some(0.002),
upstream_inference_cost: Some(0.013),
created_at: "2024-01-15T11:00:00Z".to_string(),
model: "anthropic/claude-3-opus".to_string(),
app_id: Some(67890),
streamed: StreamingStatus::Complete,
cancelled: CancellationStatus::NotCancelled,
provider_name: Some("Anthropic".to_string()),
latency: Some(2000),
moderation_latency: Some(150),
generation_time: Some(1800),
finish_reason: Some("stop".to_string()),
native_finish_reason: Some("end_turn".to_string()),
tokens_prompt: Some(100),
tokens_completion: Some(200),
native_tokens_prompt: Some(100),
native_tokens_completion: Some(200),
native_tokens_reasoning: Some(50),
num_media_prompt: Some(1),
num_media_completion: Some(0),
num_search_results: Some(3),
origin: "api".to_string(),
usage: 0.015,
is_byok: false,
},
};
let json_str = serde_json::to_string(&original)?;
let deserialized: GenerationResponse = serde_json::from_str(&json_str)?;
assert_eq!(original, deserialized);
assert_eq!(deserialized.id(), "gen-roundtrip");
assert_eq!(deserialized.model(), "anthropic/claude-3-opus");
assert_eq!(deserialized.total_cost(), 0.015);
assert_eq!(deserialized.effective_cost(), 0.013);
assert_eq!(deserialized.total_tokens(), Some(300));
assert!(deserialized.used_web_search());
assert!(deserialized.included_media());
assert!(deserialized.used_reasoning());
Ok(())
}
#[tokio::test]
async fn test_generation_cost_calculations() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::generation::{GenerationData, GenerationResponse};
let scenarios = vec![
(0.100, Some(0.020), 0.080),
(0.050, None, 0.050),
(0.075, Some(0.075), 0.000), (0.200, Some(0.025), 0.175),
];
for (total_cost, cache_discount, expected_effective) in scenarios {
let data = GenerationData {
id: GenerationId::new("gen-cost-test"),
upstream_id: None,
total_cost,
cache_discount,
upstream_inference_cost: None,
created_at: "2024-01-15T10:30:00Z".to_string(),
model: "openai/gpt-4".to_string(),
app_id: None,
streamed: StreamingStatus::default(),
cancelled: CancellationStatus::default(),
provider_name: None,
latency: None,
moderation_latency: None,
generation_time: None,
finish_reason: None,
native_finish_reason: None,
tokens_prompt: Some(100),
tokens_completion: Some(200),
native_tokens_prompt: Some(100),
native_tokens_completion: Some(200),
native_tokens_reasoning: None,
num_media_prompt: None,
num_media_completion: None,
num_search_results: None,
origin: "api".to_string(),
usage: total_cost,
is_byok: false,
};
assert!((data.effective_cost() - expected_effective).abs() < f64::EPSILON);
assert!((data.cost_per_token().unwrap() - (total_cost / 300.0)).abs() < f64::EPSILON);
let response = GenerationResponse { data };
assert!((response.effective_cost() - expected_effective).abs() < f64::EPSILON);
assert!(
(response.cost_per_token().unwrap() - (total_cost / 300.0)).abs() < f64::EPSILON
);
}
Ok(())
}
#[tokio::test]
async fn test_analytics_api_client_integration() -> Result<(), Box<dyn std::error::Error>> {
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_http_referer("https://github.com/your_org/your_repo")
.with_site_title("OpenRouter Rust SDK Tests")
.with_api_key(api_key)?;
let analytics_api = client.analytics()?;
assert!(analytics_api
.config
.base_url
.as_str()
.contains("openrouter.ai"));
Ok(())
}
#[tokio::test]
async fn test_analytics_serialization_roundtrip() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::analytics::{ActivityData, ActivityResponse};
use chrono::Utc;
let original = ActivityResponse {
data: vec![ActivityData {
id: ActivityId::new("test-123"),
created_at: Utc::now(),
model: "test-model".to_string(),
total_cost: Some(0.001),
tokens_prompt: Some(10),
tokens_completion: Some(20),
total_tokens: Some(30),
provider: Some("test-provider".to_string()),
streamed: StreamingStatus::Complete,
cancelled: CancellationStatus::NotCancelled,
web_search: Some(true),
media: Some(false),
reasoning: Some(false),
finish_reason: Some("stop".to_string()),
native_finish_reason: None,
origin: None,
latency: Some(1000),
generation_time: Some(500),
moderation_latency: None,
cache_discount: None,
effective_cost: Some(0.0009),
upstream_id: None,
user_id: None,
http_referer: None,
}],
total_count: Some(1),
has_more: Some(false),
};
let json_str = serde_json::to_string(&original)?;
let deserialized: ActivityResponse = serde_json::from_str(&json_str)?;
assert_eq!(original, deserialized);
assert_eq!(deserialized.total_cost(), 0.0009);
assert_eq!(deserialized.total_tokens(), 30);
assert_eq!(deserialized.success_rate(), 100.0);
assert_eq!(deserialized.streaming_rate(), 100.0);
Ok(())
}
#[tokio::test]
async fn test_activity_request_validation() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::analytics::ActivityRequest;
let request = ActivityRequest::new()
.with_start_date("2024-01-01")
.with_end_date("2024-01-31")
.with_order(crate::types::analytics::SortOrder::Ascending);
assert!(request.validate().is_ok());
let request = ActivityRequest::new().with_start_date("2024/01/01");
assert!(request.validate().is_err());
let request = ActivityRequest::new()
.with_start_date("2024-02-01")
.with_end_date("2024-01-31");
assert!(request.validate().is_err());
Ok(())
}
#[tokio::test]
async fn test_activity_data_convenience_methods() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::analytics::ActivityData;
use chrono::Utc;
let activity = ActivityData {
id: ActivityId::new("test-123"),
created_at: Utc::now(),
model: "test-model".to_string(),
total_cost: Some(0.001),
tokens_prompt: Some(10),
tokens_completion: Some(20),
total_tokens: Some(30),
provider: Some("test-provider".to_string()),
streamed: StreamingStatus::Complete,
cancelled: CancellationStatus::NotCancelled,
web_search: Some(true),
media: Some(false),
reasoning: Some(false),
finish_reason: Some("stop".to_string()),
native_finish_reason: None,
origin: None,
latency: Some(1000),
generation_time: Some(500),
moderation_latency: None,
cache_discount: None,
effective_cost: Some(0.0009),
upstream_id: None,
user_id: None,
http_referer: None,
};
assert!(activity.cost_per_token().is_some());
assert!(activity.cost_per_million_tokens().is_some());
assert!(activity.cost_per_token().unwrap() > 0.0);
assert!(activity.cost_per_million_tokens().unwrap() > 0.0);
assert_eq!(activity.latency_seconds(), Some(1.0));
assert_eq!(activity.generation_time_seconds(), Some(0.5));
assert!(activity.is_successful());
assert!(activity.was_streamed());
assert!(activity.used_web_search());
assert!(!activity.included_media());
assert!(!activity.used_reasoning());
assert_eq!(activity.final_cost(), Some(0.0009));
Ok(())
}
#[tokio::test]
async fn test_activity_response_aggregations() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::analytics::{ActivityData, ActivityResponse};
use chrono::Utc;
let activities = vec![
ActivityData {
id: ActivityId::new("test-1"),
created_at: Utc::now(),
model: "model-a".to_string(),
total_cost: Some(0.001),
total_tokens: Some(100),
cancelled: CancellationStatus::NotCancelled,
streamed: StreamingStatus::Complete,
web_search: Some(true),
media: Some(false),
reasoning: Some(false),
provider: Some("provider-x".to_string()),
latency: Some(1000),
..Default::default()
},
ActivityData {
id: ActivityId::new("test-2"),
created_at: Utc::now(),
model: "model-b".to_string(),
total_cost: Some(0.002),
total_tokens: Some(200),
cancelled: CancellationStatus::Completed,
streamed: StreamingStatus::NotStarted,
web_search: Some(false),
media: Some(true),
reasoning: Some(true),
provider: Some("provider-y".to_string()),
latency: Some(2000),
..Default::default()
},
];
let response = ActivityResponse {
data: activities,
total_count: Some(2),
has_more: Some(false),
};
assert_eq!(response.total_cost(), 0.003);
assert_eq!(response.total_tokens(), 300);
assert_eq!(response.average_cost_per_request(), Some(0.0015));
assert_eq!(response.success_rate(), 50.0);
assert_eq!(response.streaming_rate(), 50.0);
assert_eq!(response.average_latency_seconds(), Some(1.5));
let feature_usage = response.feature_usage_percentages();
assert_eq!(feature_usage.web_search, 50.0);
assert_eq!(feature_usage.media, 50.0);
assert_eq!(feature_usage.reasoning, 50.0);
assert_eq!(feature_usage.streaming, 50.0);
Ok(())
}
#[tokio::test]
async fn test_analytics_convenience_methods() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::analytics::ActivityRequest;
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_api_key(api_key)?;
let _analytics = client.analytics()?;
let request = ActivityRequest::new()
.with_start_date("2024-01-01")
.with_end_date("2024-01-07")
.with_model("test-model")
.with_provider("test-provider")
.with_sort(crate::types::analytics::SortField::CreatedAt)
.with_order(crate::types::analytics::SortOrder::Descending)
.with_limit(100)
.with_offset(0);
assert_eq!(request.start_date, Some("2024-01-01".to_string()));
assert_eq!(request.end_date, Some("2024-01-07".to_string()));
assert_eq!(request.model, Some("test-model".to_string()));
assert_eq!(request.provider, Some("test-provider".to_string()));
assert_eq!(
request.sort,
Some(crate::types::analytics::SortField::CreatedAt)
);
assert_eq!(
request.order,
Some(crate::types::analytics::SortOrder::Descending)
);
assert_eq!(request.limit, Some(100));
assert_eq!(request.offset, Some(0));
Ok(())
}
#[tokio::test]
async fn test_providers_api_basic_functionality() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::{Provider, ProvidersResponse};
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_api_key(api_key)?;
let providers_api = client.providers()?;
let _providers_result: Result<crate::types::ProvidersResponse, Box<dyn std::error::Error>> =
futures::future::ready(Ok(crate::types::ProvidersResponse::new(vec![]))).await;
Ok(())
}
#[tokio::test]
async fn test_provider_type_functionality() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::Provider;
let provider = Provider::new(
"OpenAI".to_string(),
"openai".to_string(),
Some("https://openai.com/policies".to_string()),
Some("https://openai.com/terms".to_string()),
Some("https://status.openai.com".to_string()),
);
assert_eq!(provider.name, "OpenAI");
assert_eq!(provider.slug, "openai");
assert!(provider.has_privacy_policy());
assert!(provider.has_terms_of_service());
assert!(provider.has_status_page());
assert_eq!(
provider.privacy_policy_domain(),
Some("openai.com".to_string())
);
assert_eq!(
provider.terms_of_service_domain(),
Some("openai.com".to_string())
);
assert_eq!(
provider.status_page_domain(),
Some("status.openai.com".to_string())
);
let minimal_provider = Provider::new(
"Test Provider".to_string(),
"test".to_string(),
None,
None,
None,
);
assert!(!minimal_provider.has_privacy_policy());
assert!(!minimal_provider.has_terms_of_service());
assert!(!minimal_provider.has_status_page());
assert_eq!(minimal_provider.privacy_policy_domain(), None);
assert_eq!(minimal_provider.terms_of_service_domain(), None);
assert_eq!(minimal_provider.status_page_domain(), None);
Ok(())
}
#[tokio::test]
async fn test_providers_response_functionality() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::{Provider, ProvidersResponse};
let providers = vec![
Provider::new(
"OpenAI".to_string(),
"openai".to_string(),
Some("https://openai.com/policies".to_string()),
Some("https://openai.com/terms".to_string()),
Some("https://status.openai.com".to_string()),
),
Provider::new(
"Anthropic".to_string(),
"anthropic".to_string(),
Some("https://anthropic.com/policies".to_string()),
Some("https://anthropic.com/terms".to_string()),
None,
),
Provider::new(
"Minimal Provider".to_string(),
"minimal".to_string(),
None,
None,
None,
),
];
let response = ProvidersResponse::new(providers);
assert_eq!(response.count(), 3);
let openai = response.find_by_slug("openai").unwrap();
assert_eq!(openai.name, "OpenAI");
let nonexistent = response.find_by_slug("nonexistent");
assert!(nonexistent.is_none());
let anthropic = response.find_by_name("ANTHROPIC").unwrap();
assert_eq!(anthropic.slug, "anthropic");
assert_eq!(response.with_privacy_policy().len(), 2);
assert_eq!(response.with_terms_of_service().len(), 2);
assert_eq!(response.with_status_page().len(), 1);
let slugs = response.sorted_slugs();
assert_eq!(slugs, vec!["anthropic", "minimal", "openai"]);
let names = response.sorted_names();
assert_eq!(names, vec!["Anthropic", "Minimal Provider", "OpenAI"]);
let domain_groups = response.group_by_domain();
assert_eq!(domain_groups.get("openai.com").unwrap().len(), 1);
assert_eq!(domain_groups.get("anthropic.com").unwrap().len(), 1);
assert_eq!(domain_groups.get("unknown").unwrap().len(), 1);
Ok(())
}
#[tokio::test]
async fn test_providers_api_method_signatures() -> Result<(), Box<dyn std::error::Error>> {
let api_key = "sk-1234567890abcdef1234567890abcdef";
let client = OpenRouterClient::<Unconfigured>::new()
.with_base_url("https://openrouter.ai/api/v1/")?
.with_api_key(api_key)?;
let providers_api = client.providers()?;
let _api_ref = &providers_api;
Ok(())
}
#[tokio::test]
async fn test_provider_url_validation() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::Provider;
let provider = Provider::new(
"Test Provider".to_string(),
"test".to_string(),
Some("https://example.com/privacy".to_string()),
Some("https://example.com/terms".to_string()),
Some("https://status.example.com".to_string()),
);
assert_eq!(
provider.privacy_policy_domain(),
Some("example.com".to_string())
);
assert_eq!(
provider.terms_of_service_domain(),
Some("example.com".to_string())
);
assert_eq!(
provider.status_page_domain(),
Some("status.example.com".to_string())
);
let provider_invalid = Provider::new(
"Invalid Provider".to_string(),
"invalid".to_string(),
Some("not-a-url".to_string()),
Some("also-not-a-url".to_string()),
Some("https://".to_string()), );
assert_eq!(provider_invalid.privacy_policy_domain(), None);
assert_eq!(provider_invalid.terms_of_service_domain(), None);
assert_eq!(provider_invalid.status_page_domain(), None);
Ok(())
}
#[tokio::test]
async fn test_default_implementations() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::{ChatCompletionRequest, Message, MessageContent};
let default_message = Message::default();
assert_eq!(default_message.role, ChatRole::User);
assert_eq!(
default_message.content,
MessageContent::Text("".to_string())
);
assert_eq!(default_message.name, None);
assert_eq!(default_message.tool_call_id, None);
assert_eq!(default_message.tool_calls, None);
let default_request = ChatCompletionRequest::default();
assert_eq!(default_request.model, "");
assert_eq!(default_request.messages.len(), 0);
assert_eq!(default_request.stream, None);
assert_eq!(default_request.response_format, None);
assert_eq!(default_request.tools, None);
assert_eq!(default_request.tool_choice, None);
assert_eq!(default_request.provider, None);
assert_eq!(default_request.models, None);
assert_eq!(default_request.transforms, None);
assert_eq!(default_request.route, None);
assert_eq!(default_request.user, None);
assert_eq!(default_request.max_tokens, None);
assert_eq!(default_request.temperature, None);
assert_eq!(default_request.top_p, None);
assert_eq!(default_request.top_k, None);
assert_eq!(default_request.frequency_penalty, None);
assert_eq!(default_request.presence_penalty, None);
assert_eq!(default_request.repetition_penalty, None);
assert_eq!(default_request.min_p, None);
assert_eq!(default_request.top_a, None);
assert_eq!(default_request.seed, None);
assert_eq!(default_request.stop, None);
assert_eq!(default_request.logit_bias, None);
assert_eq!(default_request.logprobs, None);
assert_eq!(default_request.top_logprobs, None);
assert_eq!(default_request.prediction, None);
assert_eq!(default_request.parallel_tool_calls, None);
assert_eq!(default_request.verbosity, None);
let model_name = "openai/gpt-4o";
let text = "What is a phantom type in Rust?";
let request_with_default = ChatCompletionRequest {
model: model_name.to_string(),
messages: vec![Message {
role: ChatRole::User,
content: MessageContent::Text(text.to_string()),
..Default::default()
}],
..Default::default()
};
assert_eq!(request_with_default.model, model_name);
assert_eq!(request_with_default.messages.len(), 1);
assert_eq!(request_with_default.messages[0].role, ChatRole::User);
assert_eq!(
request_with_default.messages[0].content,
MessageContent::Text(text.to_string())
);
assert_eq!(request_with_default.messages[0].name, None);
assert_eq!(request_with_default.messages[0].tool_call_id, None);
assert_eq!(request_with_default.messages[0].tool_calls, None);
assert_eq!(request_with_default.stream, None);
assert_eq!(request_with_default.response_format, None);
assert_eq!(request_with_default.tools, None);
assert_eq!(request_with_default.tool_choice, None);
assert_eq!(request_with_default.provider, None);
assert_eq!(request_with_default.models, None);
assert_eq!(request_with_default.transforms, None);
assert_eq!(request_with_default.route, None);
assert_eq!(request_with_default.user, None);
assert_eq!(request_with_default.max_tokens, None);
assert_eq!(request_with_default.temperature, None);
assert_eq!(request_with_default.top_p, None);
assert_eq!(request_with_default.top_k, None);
assert_eq!(request_with_default.frequency_penalty, None);
assert_eq!(request_with_default.presence_penalty, None);
assert_eq!(request_with_default.repetition_penalty, None);
assert_eq!(request_with_default.min_p, None);
assert_eq!(request_with_default.top_a, None);
assert_eq!(request_with_default.seed, None);
assert_eq!(request_with_default.stop, None);
assert_eq!(request_with_default.logit_bias, None);
assert_eq!(request_with_default.logprobs, None);
assert_eq!(request_with_default.top_logprobs, None);
assert_eq!(request_with_default.prediction, None);
assert_eq!(request_with_default.parallel_tool_calls, None);
assert_eq!(request_with_default.verbosity, None);
Ok(())
}
#[tokio::test]
async fn test_reasoning_config_effort_serialization() -> Result<(), Box<dyn std::error::Error>>
{
use crate::types::chat::{ReasoningConfig, ReasoningEffort};
let effort_high = ReasoningConfig::with_effort(ReasoningEffort::High);
let json = serde_json::to_string(&effort_high)?;
assert_eq!(json, r#"{"effort":"high"}"#);
let effort_minimal = ReasoningConfig::with_effort(ReasoningEffort::Minimal);
let json = serde_json::to_string(&effort_minimal)?;
assert_eq!(json, r#"{"effort":"minimal"}"#);
let effort_xhigh = ReasoningConfig::with_effort(ReasoningEffort::XHigh);
let json = serde_json::to_string(&effort_xhigh)?;
assert_eq!(json, r#"{"effort":"xhigh"}"#);
Ok(())
}
#[tokio::test]
async fn test_reasoning_config_full_serialization() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::{ReasoningConfig, ReasoningEffort, ReasoningSummary};
let config = ReasoningConfig {
effort: Some(ReasoningEffort::High),
max_tokens: Some(5000),
enabled: Some(true),
summary: Some(ReasoningSummary::Detailed),
};
let json: serde_json::Value = serde_json::from_str(&serde_json::to_string(&config)?)?;
assert_eq!(
json,
json!({
"effort": "high",
"max_tokens": 5000,
"enabled": true,
"summary": "detailed"
})
);
Ok(())
}
#[tokio::test]
async fn test_reasoning_config_roundtrip() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::{ReasoningConfig, ReasoningEffort, ReasoningSummary};
let config = ReasoningConfig {
effort: Some(ReasoningEffort::Low),
max_tokens: Some(1024),
enabled: Some(true),
summary: Some(ReasoningSummary::Concise),
};
let json = serde_json::to_string(&config)?;
let decoded: ReasoningConfig = serde_json::from_str(&json)?;
assert_eq!(config, decoded);
Ok(())
}
#[tokio::test]
async fn test_reasoning_details_deserialization() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::{Message, ReasoningDetail};
let json = r#"{
"role": "assistant",
"content": "Final answer",
"reasoning": "internal reasoning excerpt",
"reasoning_details": [
{
"type": "reasoning.summary",
"summary": "Summarized chain of thought"
},
{
"type": "reasoning.text",
"text": "Step-by-step reasoning",
"signature": "sig-123"
}
]
}"#;
let message: Message = serde_json::from_str(json)?;
let details = message.reasoning_details.expect("reasoning details");
assert_eq!(details.len(), 2);
assert!(matches!(
&details[0],
ReasoningDetail::Summary { summary, .. } if summary == "Summarized chain of thought"
));
assert!(matches!(
&details[1],
ReasoningDetail::Text { text, signature, .. }
if text.as_deref() == Some("Step-by-step reasoning")
&& signature.as_deref() == Some("sig-123")
));
Ok(())
}
#[tokio::test]
async fn test_plugin_constructors() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::Plugin;
let web = Plugin::web_search();
assert_eq!(web.id, "web");
assert_eq!(web.enabled, None);
assert!(web.config.is_none());
let healing = Plugin::response_healing();
assert_eq!(healing.id, "response-healing");
assert_eq!(healing.enabled, None);
assert!(healing.config.is_none());
let parser = Plugin::file_parser();
assert_eq!(parser.id, "file-parser");
assert_eq!(parser.enabled, None);
assert!(parser.config.is_none());
let compression = Plugin::context_compression();
assert_eq!(compression.id, "context-compression");
assert_eq!(compression.enabled, None);
assert!(compression.config.is_none());
Ok(())
}
#[tokio::test]
async fn test_plugin_serialization() -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::Plugin;
use serde_json::json;
let web = Plugin::web_search()
.with_enabled(false)
.with_config(json!({ "max_results": 5, "scope": "news" }));
let json = serde_json::to_string(&web)?;
let v: serde_json::Value = serde_json::from_str(&json)?;
assert_eq!(v["id"], "web");
assert_eq!(v["enabled"], false);
assert_eq!(v["max_results"], 5);
assert_eq!(v["scope"], "news");
assert!(v.get("config").is_none());
Ok(())
}
#[tokio::test]
async fn test_plugin_deserialization_supports_flattened_schema(
) -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::Plugin;
let json = r#"{
"id": "web",
"enabled": true,
"max_results": 10,
"scope": "docs"
}"#;
let plugin: Plugin = serde_json::from_str(json)?;
assert_eq!(plugin.id, "web");
assert_eq!(plugin.enabled, Some(true));
assert_eq!(
plugin
.config
.as_ref()
.and_then(|cfg| cfg.get("max_results")),
Some(&serde_json::Value::from(10))
);
assert_eq!(
plugin.config.as_ref().and_then(|cfg| cfg.get("scope")),
Some(&serde_json::Value::from("docs"))
);
Ok(())
}
#[tokio::test]
async fn test_prompt_tokens_details_cache_fields_deserialization(
) -> Result<(), Box<dyn std::error::Error>> {
use crate::types::chat::PromptTokensDetails;
let json = r#"{
"cached_tokens": 100,
"cache_write_tokens": 200,
"video_tokens": 50
}"#;
let details: PromptTokensDetails = serde_json::from_str(json)?;
assert_eq!(details.cached_tokens, Some(100));
assert_eq!(details.cache_write_tokens, Some(200));
assert_eq!(details.video_tokens, Some(50));
assert!(details.audio_tokens.is_none());
assert!(details.text_tokens.is_none());
assert!(details.image_tokens.is_none());
Ok(())
}
#[tokio::test]
async fn test_usage_server_tool_use_deserialization() -> Result<(), Box<dyn std::error::Error>>
{
use crate::types::chat::Usage;
let json = r#"{
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150,
"server_tool_use": {
"web_search_requests": 2,
"web_fetch_requests": 1
}
}"#;
let usage: Usage = serde_json::from_str(json)?;
assert_eq!(usage.prompt_tokens, 100);
assert_eq!(usage.completion_tokens, 50);
assert_eq!(usage.total_tokens, 150);
let server_tool_use = usage.server_tool_use.expect("server tool use");
assert_eq!(server_tool_use.web_search_requests, Some(2));
assert_eq!(server_tool_use.web_fetch_requests, Some(1));
assert!(usage.cost.is_none());
Ok(())
}
}