openrouter-rust 0.1.0

A modular, type-safe Rust client for the OpenRouter API
Documentation
use mockito::{Server, Matcher};
use openrouter_rust::{OpenRouterClient, ChatCompletionBuilder};
use serde_json::json;

#[tokio::test]
async fn test_chat_completion_success() {
    let mut server = Server::new_async().await;
    
    let mock_response = json!({
        "id": "gen-1234567890",
        "object": "chat.completion",
        "created": 1704067200,
        "model": "openai/gpt-3.5-turbo",
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": "The capital of France is Paris."
                },
                "finish_reason": "stop",
                "native_finish_reason": "stop"
            }
        ],
        "usage": {
            "prompt_tokens": 15,
            "completion_tokens": 10,
            "total_tokens": 25,
            "cost": 0.000125
        }
    });

    let _m = server.mock("POST", "/chat/completions")
        .match_header("authorization", "Bearer test-api-key")
        .match_header("content-type", "application/json")
        .with_status(200)
        .with_header("content-type", "application/json")
        .with_body(mock_response.to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-api-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
        .user_message("What is the capital of France?")
        .build();

    let response = client.chat_completion(request).await.unwrap();

    assert_eq!(response.id, "gen-1234567890");
    assert_eq!(response.model, "openai/gpt-3.5-turbo");
    assert_eq!(response.choices.len(), 1);
    assert_eq!(response.choices[0].message.content, Some("The capital of France is Paris.".to_string()));
    assert_eq!(response.choices[0].finish_reason, Some("stop".to_string()));
    
    let usage = response.usage.unwrap();
    assert_eq!(usage.prompt_tokens, 15);
    assert_eq!(usage.completion_tokens, 10);
    assert_eq!(usage.total_tokens, 25);
    assert_eq!(usage.cost, Some(0.000125));
}

#[tokio::test]
async fn test_chat_completion_with_system_message() {
    let mut server = Server::new_async().await;
    
    let mock_response = json!({
        "id": "gen-test-id",
        "object": "chat.completion",
        "created": 1704067200,
        "model": "openai/gpt-4",
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": "Rust is a systems programming language."
                },
                "finish_reason": "stop"
            }
        ],
        "usage": {
            "prompt_tokens": 20,
            "completion_tokens": 15,
            "total_tokens": 35
        }
    });

    let _m = server.mock("POST", "/chat/completions")
        .match_header("authorization", Matcher::Any)
        .match_body(Matcher::Json(json!({
            "model": "openai/gpt-4",
            "messages": [
                {"role": "system", "content": "You are a helpful coding assistant"},
                {"role": "user", "content": "What is Rust?"}
            ],
            "temperature": 0.5
        })))
        .with_status(200)
        .with_header("content-type", "application/json")
        .with_body(mock_response.to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let request = ChatCompletionBuilder::new("openai/gpt-4")
        .system_message("You are a helpful coding assistant")
        .user_message("What is Rust?")
        .temperature(0.5)
        .build();

    let response = client.chat_completion(request).await.unwrap();
    
    assert_eq!(response.choices[0].message.content, Some("Rust is a systems programming language.".to_string()));
}

#[tokio::test]
async fn test_chat_completion_error_401() {
    let mut server = Server::new_async().await;
    
    let _m = server.mock("POST", "/chat/completions")
        .with_status(401)
        .with_header("content-type", "application/json")
        .with_body(json!({
            "error": {
                "message": "Invalid API key",
                "type": "authentication_error"
            }
        }).to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("invalid-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
        .user_message("Hello")
        .build();

    let result = client.chat_completion(request).await;
    
    assert!(result.is_err());
    match result.unwrap_err() {
        openrouter_rust::OpenRouterError::ApiError { code, .. } => {
            assert_eq!(code, 401);
        }
        _ => panic!("Expected ApiError with 401"),
    }
}

#[tokio::test]
async fn test_chat_completion_error_429() {
    let mut server = Server::new_async().await;
    
    let _m = server.mock("POST", "/chat/completions")
        .with_status(429)
        .with_header("content-type", "application/json")
        .with_body(json!({
            "error": {
                "message": "Rate limit exceeded",
                "type": "rate_limit_error"
            }
        }).to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
        .user_message("Hello")
        .build();

    let result = client.chat_completion(request).await;
    
    assert!(result.is_err());
    match result.unwrap_err() {
        openrouter_rust::OpenRouterError::ApiError { code, .. } => {
            assert_eq!(code, 429);
        }
        _ => panic!("Expected ApiError with 429"),
    }
}

#[tokio::test]
async fn test_chat_completion_with_tools() {
    let mut server = Server::new_async().await;
    
    let mock_response = json!({
        "id": "gen-tools-test",
        "object": "chat.completion",
        "created": 1704067200,
        "model": "openai/gpt-4",
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": null,
                    "tool_calls": [
                        {
                            "id": "call_123",
                            "type": "function",
                            "function": {
                                "name": "get_weather",
                                "arguments": "{\"location\":\"Paris\"}"
                            }
                        }
                    ]
                },
                "finish_reason": "tool_calls"
            }
        ],
        "usage": {
            "prompt_tokens": 50,
            "completion_tokens": 20,
            "total_tokens": 70
        }
    });

    let _m = server.mock("POST", "/chat/completions")
        .with_status(200)
        .with_header("content-type", "application/json")
        .with_body(mock_response.to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let tool = openrouter_rust::types::Tool {
        tool_type: "function".to_string(),
        function: openrouter_rust::types::Function {
            name: "get_weather".to_string(),
            description: Some("Get weather".to_string()),
            parameters: serde_json::json!({"type": "object"}),
        },
    };

    let request = ChatCompletionBuilder::new("openai/gpt-4")
        .user_message("What's the weather in Paris?")
        .tools(vec![tool])
        .build();

    let response = client.chat_completion(request).await.unwrap();
    
    assert_eq!(response.choices[0].finish_reason, Some("tool_calls".to_string()));
}

#[tokio::test]
async fn test_chat_completion_with_json_mode() {
    let mut server = Server::new_async().await;
    
    let mock_response = json!({
        "id": "gen-json-test",
        "object": "chat.completion",
        "created": 1704067200,
        "model": "openai/gpt-3.5-turbo",
        "choices": [
            {
                "index": 0,
                "message": {
                    "role": "assistant",
                    "content": "{\"name\":\"John\",\"age\":30}"
                },
                "finish_reason": "stop"
            }
        ],
        "usage": {
            "prompt_tokens": 25,
            "completion_tokens": 15,
            "total_tokens": 40
        }
    });

    let _m = server.mock("POST", "/chat/completions")
        .match_body(Matcher::Json(json!({
            "model": "openai/gpt-3.5-turbo",
            "messages": [{"role": "user", "content": "Generate user JSON"}],
            "response_format": {"type": "json_object"}
        })))
        .with_status(200)
        .with_header("content-type", "application/json")
        .with_body(mock_response.to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
        .user_message("Generate user JSON")
        .response_format_json()
        .build();

    let response = client.chat_completion(request).await.unwrap();
    
    // Verify the content is valid JSON
    let content = response.choices[0].message.content.as_ref().unwrap();
    let parsed: serde_json::Value = serde_json::from_str(content).unwrap();
    assert!(parsed.get("name").is_some());
}