openrouter-rust 0.1.0

A modular, type-safe Rust client for the OpenRouter API
Documentation
use mockito::Server;
use openrouter_rust::{
    OpenRouterClient,
    generations::{GenerationResponse, GenerationData, ProviderResponse},
};
use serde_json::json;

#[tokio::test]
async fn test_get_generation() {
    let mut server = Server::new_async().await;
    
    let mock_response = json!({
        "data": {
            "id": "gen-1234567890",
            "upstream_id": "upstream-abc123",
            "total_cost": 0.0025,
            "cache_discount": 0.0001,
            "upstream_inference_cost": 0.002,
            "created_at": "2024-01-15T10:30:00Z",
            "model": "openai/gpt-4",
            "app_id": 12345.0,
            "streamed": false,
            "cancelled": false,
            "provider_name": "OpenAI",
            "latency": 1500.0,
            "moderation_latency": 50.0,
            "generation_time": 1450.0,
            "finish_reason": "stop",
            "tokens_prompt": 100.0,
            "tokens_completion": 50.0,
            "native_tokens_prompt": 100.0,
            "native_tokens_completion": 50.0,
            "native_tokens_completion_images": 0.0,
            "native_tokens_reasoning": 0.0,
            "native_tokens_cached": 0.0,
            "num_media_prompt": 0.0,
            "num_input_audio_prompt": 0.0,
            "num_media_completion": 0.0,
            "num_search_results": 0.0,
            "origin": "https://example.com",
            "usage": 0.0025,
            "is_byok": false,
            "native_finish_reason": "stop",
            "external_user": "user-123",
            "api_type": "completions",
            "router": "openrouter/auto",
            "provider_responses": [
                {
                    "id": "pr-1",
                    "endpoint_id": "ep-1",
                    "model_permaslug": "openai/gpt-4",
                    "provider_name": "OpenAI",
                    "status": 200.0,
                    "latency": 1450.0,
                    "is_byok": false
                }
            ]
        }
    });

    let _m = server.mock("GET", "/generation?id=gen-1234567890")
        .match_header("authorization", "Bearer test-key")
        .with_status(200)
        .with_header("content-type", "application/json")
        .with_body(mock_response.to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let response = client.get_generation("gen-1234567890").await.unwrap();
    
    assert_eq!(response.data.id, "gen-1234567890");
    assert_eq!(response.data.model, "openai/gpt-4");
    assert_eq!(response.data.total_cost, 0.0025);
    assert_eq!(response.data.tokens_prompt, Some(100.0));
    assert_eq!(response.data.tokens_completion, Some(50.0));
    assert_eq!(response.data.latency, Some(1500.0));
    assert!(!response.data.is_byok);
}

#[tokio::test]
async fn test_get_generation_not_found() {
    let mut server = Server::new_async().await;
    
    let _m = server.mock("GET", "/generation?id=gen-notfound")
        .with_status(404)
        .with_header("content-type", "application/json")
        .with_body(json!({"error": "Generation not found"}).to_string())
        .create_async()
        .await;

    let client = OpenRouterClient::builder()
        .api_key("test-key")
        .base_url(&server.url())
        .build()
        .unwrap();

    let result = client.get_generation("gen-notfound").await;
    assert!(result.is_err());
}

#[test]
fn test_generation_data_struct() {
    let provider_response = ProviderResponse {
        id: "pr-1".to_string(),
        endpoint_id: "ep-1".to_string(),
        model_permaslug: "model-1".to_string(),
        provider_name: "Test Provider".to_string(),
        status: Some(200.0),
        latency: 1000.0,
        is_byok: false,
    };

    assert_eq!(provider_response.id, "pr-1");
    assert_eq!(provider_response.latency, 1000.0);
}