ai_lib/
config.rs

1//! 配置管理模块,提供连接和运行时配置选项
2//!
3//! Configuration management module providing connection and runtime configuration options.
4//!
5//! This module defines configuration structures for customizing ai-lib behavior,
6//! including connection settings, resilience parameters, and provider-specific options.
7
8use crate::circuit_breaker::CircuitBreakerConfig;
9use crate::error_handling::ErrorThresholds;
10use crate::rate_limiter::RateLimiterConfig;
11use std::time::Duration;
12
13/// 最小化的显式连接/配置选项。
14///
15/// Library users can pass an instance of this struct to `AiClient::with_options` to
16/// explicitly control base URL, proxy, API key and timeout without relying exclusively
17/// on environment variables. Any field left as `None` will fall back to existing
18/// environment variable behavior or library defaults.
19#[derive(Clone, Debug, Default)]
20pub struct ConnectionOptions {
21    pub base_url: Option<String>,
22    pub proxy: Option<String>,
23    pub api_key: Option<String>,
24    pub timeout: Option<Duration>,
25    pub disable_proxy: bool,
26}
27
28// Default derived above
29
30impl ConnectionOptions {
31    /// Hydrate unset fields from environment variables (lightweight fallback logic).
32    ///
33    /// `provider_env_prefix` may be something like `OPENAI`, `GROQ`, etc., used to look up
34    /// a provider specific API key prior to the generic fallback `AI_API_KEY`.
35    pub fn hydrate_with_env(mut self, provider_env_prefix: &str) -> Self {
36        // API key precedence: explicit > <PROVIDER>_API_KEY > AI_API_KEY
37        if self.api_key.is_none() {
38            let specific = format!("{}_API_KEY", provider_env_prefix);
39            self.api_key = std::env::var(&specific)
40                .ok()
41                .or_else(|| std::env::var("AI_API_KEY").ok());
42        }
43        // Base URL precedence: explicit > AI_BASE_URL (generic) > leave None (caller/adapter handles default)
44        if self.base_url.is_none() {
45            if let Ok(v) = std::env::var("AI_BASE_URL") {
46                self.base_url = Some(v);
47            }
48        }
49        // Proxy precedence: explicit > AI_PROXY_URL
50        if self.proxy.is_none() && !self.disable_proxy {
51            self.proxy = std::env::var("AI_PROXY_URL").ok();
52        }
53        // Timeout precedence: explicit > AI_TIMEOUT_SECS > default handled by caller
54        if self.timeout.is_none() {
55            if let Ok(v) = std::env::var("AI_TIMEOUT_SECS") {
56                if let Ok(secs) = v.parse::<u64>() {
57                    self.timeout = Some(Duration::from_secs(secs));
58                }
59            }
60        }
61        self
62    }
63}
64
65/// Resilience configuration for advanced error handling and rate limiting
66#[derive(Debug, Clone, Default)]
67pub struct ResilienceConfig {
68    pub circuit_breaker: Option<CircuitBreakerConfig>,
69    pub rate_limiter: Option<RateLimiterConfig>,
70    pub backpressure: Option<BackpressureConfig>,
71    pub error_handling: Option<ErrorHandlingConfig>,
72}
73
74/// Backpressure configuration
75#[derive(Debug, Clone)]
76pub struct BackpressureConfig {
77    pub max_concurrent_requests: usize,
78}
79
80/// Error handling configuration
81#[derive(Debug, Clone)]
82pub struct ErrorHandlingConfig {
83    pub enable_recovery: bool,
84    pub enable_monitoring: bool,
85    pub error_thresholds: ErrorThresholds,
86}
87
88// Default derived above
89
90impl Default for BackpressureConfig {
91    fn default() -> Self {
92        Self {
93            max_concurrent_requests: 100,
94        }
95    }
96}
97
98impl Default for ErrorHandlingConfig {
99    fn default() -> Self {
100        Self {
101            enable_recovery: true,
102            enable_monitoring: true,
103            error_thresholds: ErrorThresholds::default(),
104        }
105    }
106}
107
108impl ResilienceConfig {
109    /// Create smart defaults for production use
110    pub fn smart_defaults() -> Self {
111        Self {
112            circuit_breaker: Some(CircuitBreakerConfig::default()),
113            rate_limiter: Some(RateLimiterConfig::default()),
114            backpressure: Some(BackpressureConfig::default()),
115            error_handling: Some(ErrorHandlingConfig::default()),
116        }
117    }
118
119    /// Create production-ready configuration
120    pub fn production() -> Self {
121        Self {
122            circuit_breaker: Some(CircuitBreakerConfig::production()),
123            rate_limiter: Some(RateLimiterConfig::production()),
124            backpressure: Some(BackpressureConfig {
125                max_concurrent_requests: 50,
126            }),
127            error_handling: Some(ErrorHandlingConfig {
128                enable_recovery: true,
129                enable_monitoring: true,
130                error_thresholds: ErrorThresholds {
131                    error_rate_threshold: 0.05, // 5% error rate
132                    consecutive_errors: 3,
133                    time_window: Duration::from_secs(30),
134                },
135            }),
136        }
137    }
138
139    /// Create development configuration
140    pub fn development() -> Self {
141        Self {
142            circuit_breaker: Some(CircuitBreakerConfig::development()),
143            rate_limiter: Some(RateLimiterConfig::development()),
144            backpressure: Some(BackpressureConfig {
145                max_concurrent_requests: 200,
146            }),
147            error_handling: Some(ErrorHandlingConfig {
148                enable_recovery: false,
149                enable_monitoring: false,
150                error_thresholds: ErrorThresholds::default(),
151            }),
152        }
153    }
154}