praxis_graph/
client_factory.rs

1use crate::types::{LLMConfig, Provider};
2use anyhow::{Result, anyhow};
3use std::sync::Arc;
4use praxis_llm::{LLMClient, ReasoningClient};
5
6/// Factory Pattern: Centralized logic for client creation and configuration
7/// 
8/// This factory encapsulates the logic of determining which LLM client to use
9/// based on model configuration, abstracting provider-specific details from the graph.
10pub struct ClientFactory;
11
12impl ClientFactory {
13    /// Check if a model supports reasoning capabilities
14    /// 
15    /// Reasoning models (gpt-5, o1-*) require special handling and use the Responses API
16    pub fn supports_reasoning(model: &str) -> bool {
17        model.starts_with("gpt-5") || model.starts_with("o1")
18    }
19    
20    /// Validate that the given LLM configuration is supported
21    pub fn validate_config(config: &LLMConfig) -> Result<()> {
22        match config.provider {
23            Provider::OpenAI => Ok(()),
24            Provider::Azure => {
25                Err(anyhow!("Azure provider not yet implemented. Use Provider::OpenAI for now."))
26            }
27            Provider::Anthropic => {
28                Err(anyhow!("Anthropic provider not yet implemented. Use Provider::OpenAI for now."))
29            }
30        }
31    }
32    
33    /// Determine if the given client supports reasoning based on the model
34    /// 
35    /// This is a runtime check to see if we should attempt to use the Reasoning API
36    pub fn should_use_reasoning_api(
37        config: &LLMConfig,
38        reasoning_client: &Option<Arc<dyn ReasoningClient>>,
39    ) -> bool {
40        Self::supports_reasoning(&config.model) && reasoning_client.is_some()
41    }
42    
43    /// Future: Create an LLM client from configuration
44    /// 
45    /// Currently, clients are created at the application level and passed to the graph.
46    /// This method is reserved for future use when we might want to create clients
47    /// dynamically at runtime.
48    #[allow(dead_code)]
49    pub fn create_client(_config: &LLMConfig, _api_key: &str) -> Result<Arc<dyn LLMClient>> {
50        // Future implementation
51        Err(anyhow!("Dynamic client creation not yet implemented. Create clients at application level and pass to GraphBuilder."))
52    }
53}
54
55#[cfg(test)]
56mod tests {
57    use super::*;
58    use crate::types::LLMConfig;
59    
60    #[test]
61    fn test_supports_reasoning() {
62        assert!(ClientFactory::supports_reasoning("gpt-5"));
63        assert!(ClientFactory::supports_reasoning("gpt-5-turbo"));
64        assert!(ClientFactory::supports_reasoning("o1-preview"));
65        assert!(ClientFactory::supports_reasoning("o1-mini"));
66        
67        assert!(!ClientFactory::supports_reasoning("gpt-4o"));
68        assert!(!ClientFactory::supports_reasoning("gpt-4o-mini"));
69        assert!(!ClientFactory::supports_reasoning("gpt-3.5-turbo"));
70    }
71    
72    #[test]
73    fn test_validate_config() {
74        let openai_config = LLMConfig::new("gpt-4o").with_provider(Provider::OpenAI);
75        assert!(ClientFactory::validate_config(&openai_config).is_ok());
76        
77        let azure_config = LLMConfig::new("gpt-4o").with_provider(Provider::Azure);
78        assert!(ClientFactory::validate_config(&azure_config).is_err());
79        
80        let anthropic_config = LLMConfig::new("claude-3").with_provider(Provider::Anthropic);
81        assert!(ClientFactory::validate_config(&anthropic_config).is_err());
82    }
83}
84