use simple_agent_type::prelude::*;
#[test]
fn test_complete_request_response_cycle() {
let request = CompletionRequest::builder()
.model("gpt-4")
.message(Message::system("You are helpful."))
.message(Message::user("Hello!"))
.temperature(0.7)
.max_tokens(100)
.build()
.unwrap();
let json = serde_json::to_string(&request).unwrap();
assert!(json.contains("gpt-4"));
assert!(json.contains("Hello!"));
let parsed: CompletionRequest = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.model, "gpt-4");
assert_eq!(parsed.messages.len(), 2);
let response = CompletionResponse {
id: "resp_123".to_string(),
model: "gpt-4".to_string(),
choices: vec![CompletionChoice {
index: 0,
message: Message::assistant("Hi there!"),
finish_reason: FinishReason::Stop,
logprobs: None,
}],
usage: Usage::new(20, 10),
created: Some(1234567890),
provider: Some("openai".to_string()),
healing_metadata: None,
};
assert_eq!(response.content(), Some("Hi there!"));
assert_eq!(response.usage.total_tokens, 30);
let resp_json = serde_json::to_string(&response).unwrap();
let parsed_resp: CompletionResponse = serde_json::from_str(&resp_json).unwrap();
assert_eq!(parsed_resp.id, "resp_123");
}
#[test]
fn test_error_handling_flow() {
let rate_limit = ProviderError::RateLimit {
retry_after: Some(std::time::Duration::from_secs(60)),
};
assert!(rate_limit.is_retryable());
let invalid_key = ProviderError::InvalidApiKey;
assert!(!invalid_key.is_retryable());
let main_err: SimpleAgentsError = rate_limit.into();
assert!(matches!(main_err, SimpleAgentsError::Provider(_)));
let validation_err = ValidationError::OutOfRange {
field: "temperature".to_string(),
min: 0.0,
max: 2.0,
};
let main_err: SimpleAgentsError = validation_err.into();
assert!(matches!(main_err, SimpleAgentsError::Validation(_)));
}
#[test]
fn test_coercion_tracking() {
let result = CoercionResult::new("fixed data")
.with_flag(CoercionFlag::StrippedMarkdown)
.with_flag(CoercionFlag::FixedTrailingComma)
.with_flag(CoercionFlag::TypeCoercion {
from: "string".to_string(),
to: "number".to_string(),
})
.set_confidence(0.85);
assert!(result.was_coerced());
assert_eq!(result.flags.len(), 3);
assert!(result.has_major_coercions());
assert!(result.is_confident(0.8));
assert!(!result.is_confident(0.9));
let mapped = result.map(|s| s.to_uppercase());
assert_eq!(mapped.value, "FIXED DATA");
assert_eq!(mapped.flags.len(), 3);
assert_eq!(mapped.confidence, 0.85);
}
#[test]
fn test_api_key_security() {
let key = ApiKey::new("sk-1234567890abcdefghijklmnopqrstuvwxyz").unwrap();
let debug = format!("{:?}", key);
assert!(debug.contains("REDACTED"));
assert!(!debug.contains("sk-"));
let json = serde_json::to_string(&key).unwrap();
assert_eq!(json, "\"[REDACTED]\"");
let preview = key.preview();
assert!(preview.contains("sk-"));
assert!(preview.contains("chars"));
assert!(!preview.contains("abcdef"));
assert_eq!(key.expose().len(), 39);
}
#[test]
fn test_retry_config_defaults() {
use simple_agent_type::provider::RetryConfig;
let retry = RetryConfig::default();
assert_eq!(retry.max_attempts, 3);
}
#[test]
fn test_request_validation() {
let valid = CompletionRequest::builder()
.model("gpt-4")
.message(Message::user("test"))
.build();
assert!(valid.is_ok());
let no_model = CompletionRequest::builder()
.message(Message::user("test"))
.build();
assert!(no_model.is_err());
let bad_temp = CompletionRequest::builder()
.model("gpt-4")
.message(Message::user("test"))
.temperature(3.0)
.build();
assert!(bad_temp.is_err());
let bad_top_p = CompletionRequest::builder()
.model("gpt-4")
.message(Message::user("test"))
.top_p(1.5)
.build();
assert!(bad_top_p.is_err());
let bad_model = CompletionRequest::builder()
.model("gpt-4!")
.message(Message::user("test"))
.build();
assert!(bad_model.is_err());
}
#[test]
fn test_streaming_types() {
let chunk = CompletionChunk {
id: "chunk_123".to_string(),
model: "gpt-4".to_string(),
choices: vec![ChoiceDelta {
index: 0,
delta: MessageDelta {
role: Some(Role::Assistant),
content: Some("Hello".to_string()),
reasoning_content: None,
tool_calls: None,
},
finish_reason: None,
}],
created: Some(1234567890),
usage: None,
};
let json = serde_json::to_string(&chunk).unwrap();
let parsed: CompletionChunk = serde_json::from_str(&json).unwrap();
assert_eq!(parsed.id, "chunk_123");
assert_eq!(parsed.choices[0].delta.content, Some("Hello".to_string()));
}
#[test]
fn test_provider_request_response() {
let req = ProviderRequest::new("https://api.example.com/v1/chat")
.with_header("Authorization", "Bearer sk-test")
.with_header("Content-Type", "application/json")
.with_body(serde_json::json!({"model": "test"}))
.with_timeout(std::time::Duration::from_secs(30));
assert_eq!(req.url, "https://api.example.com/v1/chat");
assert_eq!(req.headers.len(), 2);
assert_eq!(req.timeout, Some(std::time::Duration::from_secs(30)));
let resp = ProviderResponse::new(200, serde_json::json!({"status": "ok"}));
assert!(resp.is_success());
assert!(!resp.is_client_error());
assert!(!resp.is_server_error());
let error_resp = ProviderResponse::new(500, serde_json::json!({"error": "server error"}));
assert!(!error_resp.is_success());
assert!(error_resp.is_server_error());
}
#[test]
fn test_all_types_are_send_sync() {
fn assert_send_sync<T: Send + Sync>() {}
assert_send_sync::<Message>();
assert_send_sync::<CompletionRequest>();
assert_send_sync::<CompletionResponse>();
assert_send_sync::<Usage>();
assert_send_sync::<SimpleAgentsError>();
assert_send_sync::<ProviderError>();
assert_send_sync::<HealingError>();
assert_send_sync::<ValidationError>();
assert_send_sync::<CoercionFlag>();
assert_send_sync::<CoercionResult<String>>();
assert_send_sync::<ProviderRequest>();
assert_send_sync::<ProviderResponse>();
}