ecl_core/llm/
claude.rs

1//! Claude API provider implementation.
2
3use async_trait::async_trait;
4
5use super::provider::{
6    CompletionRequest, CompletionResponse, CompletionStream, LlmProvider, StopReason, TokenUsage,
7};
8use crate::{Error, Result};
9
10/// LLM provider using Anthropic's Claude API.
11pub struct ClaudeProvider {
12    api_key: String,
13    model: String,
14    client: reqwest::Client,
15}
16
17impl ClaudeProvider {
18    /// Creates a new Claude provider.
19    ///
20    /// # Arguments
21    ///
22    /// * `api_key` - Anthropic API key
23    /// * `model` - Model ID (e.g., "claude-sonnet-4-20250514")
24    pub fn new(api_key: impl Into<String>, model: impl Into<String>) -> Self {
25        Self {
26            api_key: api_key.into(),
27            model: model.into(),
28            client: reqwest::Client::new(),
29        }
30    }
31}
32
33#[async_trait]
34impl LlmProvider for ClaudeProvider {
35    async fn complete(&self, request: CompletionRequest) -> Result<CompletionResponse> {
36        // Build Claude API request
37        let mut body = serde_json::json!({
38            "model": self.model,
39            "max_tokens": request.max_tokens,
40            "messages": request.messages,
41        });
42
43        if let Some(system) = request.system_prompt {
44            body["system"] = serde_json::json!(system);
45        }
46
47        if let Some(temp) = request.temperature {
48            body["temperature"] = serde_json::json!(temp);
49        }
50
51        if !request.stop_sequences.is_empty() {
52            body["stop_sequences"] = serde_json::json!(request.stop_sequences);
53        }
54
55        // Make API request
56        let response = self
57            .client
58            .post("https://api.anthropic.com/v1/messages")
59            .header("x-api-key", &self.api_key)
60            .header("anthropic-version", "2023-06-01")
61            .header("content-type", "application/json")
62            .json(&body)
63            .send()
64            .await
65            .map_err(|e| Error::llm_with_source("Failed to call Claude API", e))?;
66
67        // Check for errors
68        if !response.status().is_success() {
69            let status = response.status();
70            let error_text = response
71                .text()
72                .await
73                .unwrap_or_else(|_| "Unknown error".to_string());
74            return Err(Error::llm(format!(
75                "Claude API error {}: {}",
76                status, error_text
77            )));
78        }
79
80        // Parse response
81        let response_body: serde_json::Value = response
82            .json()
83            .await
84            .map_err(|e| Error::llm_with_source("Failed to parse Claude response", e))?;
85
86        // Extract content
87        let content = response_body["content"][0]["text"]
88            .as_str()
89            .ok_or_else(|| Error::llm("Missing content in Claude response"))?
90            .to_string();
91
92        // Extract token usage
93        let usage = response_body["usage"]
94            .as_object()
95            .ok_or_else(|| Error::llm("Missing usage data in Claude response"))?;
96
97        let input_tokens = usage["input_tokens"]
98            .as_u64()
99            .ok_or_else(|| Error::llm("Invalid input_tokens"))?;
100        let output_tokens = usage["output_tokens"]
101            .as_u64()
102            .ok_or_else(|| Error::llm("Invalid output_tokens"))?;
103
104        // Extract stop reason
105        let stop_reason_str = response_body["stop_reason"]
106            .as_str()
107            .ok_or_else(|| Error::llm("Missing stop_reason"))?;
108
109        let stop_reason = match stop_reason_str {
110            "end_turn" => StopReason::EndTurn,
111            "max_tokens" => StopReason::MaxTokens,
112            "stop_sequence" => StopReason::StopSequence,
113            other => return Err(Error::llm(format!("Unknown stop reason: {}", other))),
114        };
115
116        Ok(CompletionResponse {
117            content,
118            tokens_used: TokenUsage {
119                input: input_tokens,
120                output: output_tokens,
121            },
122            stop_reason,
123        })
124    }
125
126    async fn complete_streaming(&self, _request: CompletionRequest) -> Result<CompletionStream> {
127        // Streaming implementation deferred to Phase 3
128        Err(Error::llm("Streaming not yet implemented"))
129    }
130}
131
132#[cfg(test)]
133#[allow(clippy::unwrap_used)]
134mod tests {
135    use super::*;
136    use crate::llm::Message;
137
138    #[test]
139    fn test_claude_provider_construction() {
140        let provider = ClaudeProvider::new("test-key", "claude-3-opus");
141        assert_eq!(provider.api_key, "test-key");
142        assert_eq!(provider.model, "claude-3-opus");
143    }
144
145    // Integration test (requires API key, run manually)
146    #[tokio::test]
147    #[ignore]
148    #[allow(clippy::expect_used)]
149    async fn test_claude_provider_integration() {
150        let api_key = std::env::var("ANTHROPIC_API_KEY")
151            .expect("ANTHROPIC_API_KEY must be set for integration tests");
152
153        let provider = ClaudeProvider::new(api_key, "claude-sonnet-4-20250514");
154
155        let request = CompletionRequest::new(vec![Message::user("Say hello")]).with_max_tokens(100);
156
157        let response = provider.complete(request).await.unwrap();
158
159        assert!(!response.content.is_empty());
160        assert!(response.tokens_used.output > 0);
161    }
162}