rainy-sdk 0.6.14

Official Rust SDK for Rainy API by Enosis Labs v0.6.14 - OpenAI/GPT-5 parity, native streaming events, and legacy static model cleanup
Documentation
use rainy_sdk::{
    AuthConfig, ChatCompletionRequest, ChatMessage, MessageRole, RainyError, RainySessionClient,
    RetryConfig, SessionConfig, ThinkingLevel,
};

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_auth_config_validation() {
        // Test valid standard API key (51 chars: ra- + 48 hex)
        let standard_key = format!("ra-{}", "a".repeat(48));
        let config = AuthConfig::new(&standard_key);
        assert!(config.validate().is_ok());

        // Test invalid API key format (no ra- prefix)
        let config = AuthConfig::new("invalid-key");
        assert!(config.validate().is_err());

        // Test invalid standard key length (too short)
        let config = AuthConfig::new("ra-tooshort");
        assert!(config.validate().is_err());
    }

    #[test]
    fn test_auth_config_builder() {
        // Use valid 51-char key format
        let valid_key = format!("ra-{}", "c".repeat(48));
        let config = AuthConfig::new(&valid_key)
            .with_timeout(60)
            .with_max_retries(5);

        assert_eq!(config.timeout_seconds, 60);
        assert_eq!(config.max_retries, 5);
        assert!(config.validate().is_ok());
    }

    #[test]
    fn test_session_client_builder() {
        let client = RainySessionClient::with_config(
            SessionConfig::new()
                .with_base_url("http://localhost:3000")
                .with_timeout(15),
        )
        .expect("session client should build");

        assert_eq!(client.base_url(), "http://localhost:3000");
        assert!(client.access_token().is_none());
    }

    #[test]
    fn test_chat_message_creation() {
        let user_msg = ChatMessage::user("Hello");
        assert_eq!(user_msg.role, MessageRole::User);
        assert_eq!(user_msg.content, "Hello");

        let system_msg = ChatMessage::system("You are helpful");
        assert_eq!(system_msg.role, MessageRole::System);
        assert_eq!(system_msg.content, "You are helpful");

        let assistant_msg = ChatMessage::assistant("Hi there");
        assert_eq!(assistant_msg.role, MessageRole::Assistant);
        assert_eq!(assistant_msg.content, "Hi there");
    }

    #[test]
    fn test_chat_completion_request_builder() {
        let messages = vec![ChatMessage::user("Test message")];
        let request = ChatCompletionRequest::new("gpt-4o", messages.clone())
            .with_temperature(0.7)
            .with_max_tokens(100)
            .with_user("test-user");

        assert_eq!(request.model, "gpt-4o");
        assert_eq!(request.messages, messages);
        assert_eq!(request.temperature, Some(0.7));
        assert_eq!(request.max_tokens, Some(100));
        assert_eq!(request.user, Some("test-user".to_string()));
    }

    #[test]
    fn test_retry_config() {
        let config = RetryConfig::new(5);
        assert_eq!(config.max_retries, 5);

        // Test delay calculation
        let delay0 = config.delay_for_attempt(0);
        let delay1 = config.delay_for_attempt(1);
        let delay2 = config.delay_for_attempt(2);

        assert!(delay1.as_millis() >= delay0.as_millis());
        assert!(delay2.as_millis() >= delay1.as_millis());
        assert!(delay2.as_millis() <= config.max_delay_ms as u128);
    }

    #[test]
    fn test_error_retryability() {
        let auth_error = RainyError::Authentication {
            code: "INVALID_KEY".to_string(),
            message: "Invalid key".to_string(),
            retryable: false,
        };
        assert!(!auth_error.is_retryable());

        let network_error = RainyError::Network {
            message: "Connection failed".to_string(),
            retryable: true,
            source_error: None,
        };
        assert!(network_error.is_retryable());

        let rate_limit_error = RainyError::RateLimit {
            code: "RATE_LIMIT_EXCEEDED".to_string(),
            message: "Too many requests".to_string(),
            retry_after: Some(60),
            current_usage: None,
        };
        assert!(rate_limit_error.is_retryable());
        assert_eq!(rate_limit_error.retry_after(), Some(60));
    }

    #[test]
    fn test_error_codes() {
        let auth_error = RainyError::Authentication {
            code: "INVALID_KEY".to_string(),
            message: "Invalid key".to_string(),
            retryable: false,
        };
        assert_eq!(auth_error.code(), Some("INVALID_KEY"));

        let network_error = RainyError::Network {
            message: "Connection failed".to_string(),
            retryable: true,
            source_error: None,
        };
        assert_eq!(network_error.code(), None);
    }

    #[test]
    fn test_thinking_capability_flags() {
        let gemini3 = ChatCompletionRequest::new("gemini-3-pro-preview", vec![]);
        assert!(gemini3.supports_thinking());
        assert!(gemini3.requires_thought_signatures());

        let gemini25 = ChatCompletionRequest::new("gemini-2.5-pro", vec![]);
        assert!(gemini25.supports_thinking());
        assert!(!gemini25.requires_thought_signatures());

        let gpt = ChatCompletionRequest::new("gpt-4o", vec![]);
        assert!(!gpt.supports_thinking());
        assert!(!gpt.requires_thought_signatures());
    }

    #[test]
    fn test_thinking_validation_rules() {
        let valid_gemini3 = ChatCompletionRequest::new("gemini-3-flash-preview", vec![])
            .with_thinking_level(ThinkingLevel::Medium)
            .validate_openai_compatibility();
        assert!(valid_gemini3.is_ok());

        let invalid_non_gemini = ChatCompletionRequest::new("gpt-4o", vec![])
            .with_thinking_level(ThinkingLevel::High)
            .validate_openai_compatibility();
        assert!(invalid_non_gemini.is_err());
        assert!(invalid_non_gemini
            .err()
            .unwrap()
            .contains("thinking_level is only supported for Gemini 3"));

        let invalid_gemini3_pro_level = ChatCompletionRequest::new("gemini-3-pro-preview", vec![])
            .with_thinking_level(ThinkingLevel::Minimal)
            .validate_openai_compatibility();
        assert!(invalid_gemini3_pro_level.is_err());
        assert!(invalid_gemini3_pro_level
            .err()
            .unwrap()
            .contains("Gemini 3 Pro only supports 'low' and 'high'"));

        let valid_budget = ChatCompletionRequest::new("gemini-2.5-pro", vec![])
            .with_thinking_budget(1024)
            .validate_openai_compatibility();
        assert!(valid_budget.is_ok());

        let invalid_budget_model = ChatCompletionRequest::new("gemini-3-pro-preview", vec![])
            .with_thinking_budget(1024)
            .validate_openai_compatibility();
        assert!(invalid_budget_model.is_err());
        assert!(invalid_budget_model
            .err()
            .unwrap()
            .contains("thinking_budget is only supported for Gemini 2.5"));

        let conflicting = ChatCompletionRequest::new("gemini-3-pro-preview", vec![])
            .with_thinking_level(ThinkingLevel::High)
            .with_thinking_budget(1024)
            .validate_openai_compatibility();
        assert!(conflicting.is_err());
        let conflict_error = conflicting.err().unwrap();
        assert!(
            conflict_error.contains("Cannot specify both thinking_level")
                || conflict_error.contains("thinking_budget is only supported for Gemini 2.5")
        );
    }

    #[cfg(feature = "legacy")]
    #[test]
    fn test_model_constants() {
        use rainy_sdk::models::model_constants::*;

        assert_eq!(OPENAI_GPT_4O, "gpt-4o");
        assert_eq!(GOOGLE_GEMINI_2_5_PRO, "gemini-2.5-pro");
        assert_eq!(GROQ_LLAMA_3_1_8B_INSTANT, "llama-3.1-8b-instant");
        assert_eq!(CEREBRAS_LLAMA3_1_8B, "cerebras/llama3.1-8b");
    }
}