1use crate::bulk_processing::{BulkProcessingConfig, ChunkProcessor};
4use crate::cache::{CacheConfig, IntelligentCache, SmartCacheStrategy};
5use crate::error::{Error, Result};
6use crate::production::{Metrics, PoolConfig, ProductionExecutor, RateLimitConfig, RetryConfig};
7use serde::Serialize;
8use serde::de::DeserializeOwned;
9use std::sync::Arc;
10use std::time::Duration;
11use tracing::{info, warn};
12
13#[derive(Debug, Clone)]
15pub struct ProductionConfig {
16 pub api_key: String,
18 pub base_url: String,
20 pub timeout: Duration,
22 pub rate_limit: RateLimitConfig,
24 pub pool: PoolConfig,
26 pub retry: RetryConfig,
28 pub cache: CacheConfig,
30 pub bulk_processing: BulkProcessingConfig,
32 pub enable_caching: bool,
34 pub enable_metrics: bool,
35 pub enable_tracing: bool,
36}
37
38impl Default for ProductionConfig {
39 fn default() -> Self {
40 Self {
41 api_key: std::env::var("FMP_API_KEY").unwrap_or_default(),
42 base_url: "https://financialmodelingprep.com/stable".to_string(),
43 timeout: Duration::from_secs(30),
44 rate_limit: RateLimitConfig::default(),
45 pool: PoolConfig::default(),
46 retry: RetryConfig::default(),
47 cache: CacheConfig::default(),
48 bulk_processing: BulkProcessingConfig::default(),
49 enable_caching: true,
50 enable_metrics: true,
51 enable_tracing: true,
52 }
53 }
54}
55
56pub struct ProductionFmpClient {
58 config: ProductionConfig,
59 executor: Arc<ProductionExecutor>,
60 cache: Option<Arc<IntelligentCache<String>>>, }
62
63impl ProductionFmpClient {
64 pub async fn new(config: ProductionConfig) -> Result<Self> {
66 if config.api_key.is_empty() {
67 return Err(Error::MissingApiKey);
68 }
69
70 if config.enable_tracing {
72 Self::init_tracing();
73 }
74
75 let executor = Arc::new(ProductionExecutor::new(
77 config.pool.clone(),
78 config.rate_limit.clone(),
79 config.retry.clone(),
80 config.timeout,
81 )?);
82
83 let cache = if config.enable_caching {
85 Some(Arc::new(IntelligentCache::new(config.cache.clone())))
86 } else {
87 None
88 };
89
90 info!(
91 "Production FMP client initialized with caching: {}, metrics: {}",
92 config.enable_caching, config.enable_metrics
93 );
94
95 Ok(Self {
96 config,
97 executor,
98 cache,
99 })
100 }
101
102 pub fn builder() -> ProductionClientBuilder {
104 ProductionClientBuilder::new()
105 }
106
107 pub async fn get_cached<T>(&self, endpoint: &str, query_params: Option<&str>) -> Result<T>
109 where
110 T: DeserializeOwned,
111 {
112 let cache_key = SmartCacheStrategy::generate_cache_key(endpoint, query_params);
114
115 if let Some(cache) = &self.cache {
117 if let Some(cached_json) = cache.get(&cache_key).await {
118 match serde_json::from_str::<T>(&cached_json) {
119 Ok(data) => return Ok(data),
120 Err(e) => {
121 warn!("Failed to deserialize cached data: {}", e);
122 }
124 }
125 }
126 }
127
128 let url = self.build_url(endpoint);
130 let mut request_builder = self.executor.client().get(&url);
131
132 if let Some(params) = query_params {
134 request_builder = request_builder
135 .header("Content-Type", "application/x-www-form-urlencoded")
136 .body(params.to_string());
137 } else {
138 request_builder = request_builder.query(&[("apikey", &self.config.api_key)]);
139 }
140
141 let json_response: String = self.executor.execute_request(request_builder).await?;
143
144 if let Some(cache) = &self.cache {
146 if SmartCacheStrategy::should_cache_endpoint(endpoint) {
147 let ttl = SmartCacheStrategy::get_ttl_for_endpoint(endpoint);
148 cache.set(cache_key, json_response.clone(), Some(ttl)).await;
149 }
150 }
151
152 serde_json::from_str(&json_response).map_err(Error::from)
154 }
155
156 pub async fn get_with_query<T, Q>(&self, endpoint: &str, query: &Q) -> Result<T>
158 where
159 T: DeserializeOwned,
160 Q: Serialize,
161 {
162 let query_string = serde_json::to_string(query)
163 .map_err(|e| Error::Custom(format!("Failed to serialize query: {}", e)))?;
164
165 self.get_cached(endpoint, Some(&query_string)).await
166 }
167
168 pub async fn process_bulk_data<T, F, R>(&self, data: Vec<T>, processor: F) -> Result<Vec<R>>
170 where
171 T: Clone + Send + Sync,
172 F: Fn(Vec<T>) -> std::pin::Pin<Box<dyn std::future::Future<Output = Result<R>> + Send>>
173 + Send
174 + Sync,
175 R: Send,
176 {
177 let mut chunk_processor = ChunkProcessor::new(self.config.bulk_processing.clone());
178 chunk_processor.add_items(data)?;
179
180 chunk_processor
181 .process_chunks(|chunk| processor(chunk))
182 .await
183 }
184
185 pub async fn get_metrics(&self) -> Option<Metrics> {
187 if self.config.enable_metrics {
188 Some(self.executor.get_metrics().await)
189 } else {
190 None
191 }
192 }
193
194 pub async fn get_cache_metrics(&self) -> Option<crate::cache::CacheMetrics> {
196 if let Some(cache) = &self.cache {
197 Some(cache.get_metrics().await)
198 } else {
199 None
200 }
201 }
202
203 pub async fn cleanup_cache(&self) {
205 if let Some(cache) = &self.cache {
206 cache.cleanup_expired().await;
207 }
208 }
209
210 fn build_url(&self, path: &str) -> String {
212 format!("{}/{}", self.config.base_url, path.trim_start_matches('/'))
213 }
214
215 fn init_tracing() {
217 static INIT: std::sync::Once = std::sync::Once::new();
219 INIT.call_once(|| {
220 tracing_subscriber::fmt().init();
221 });
222 }
223
224 pub async fn health_check(&self) -> Result<bool> {
226 match self
228 .get_cached::<serde_json::Value>("/search", Some("query=AAPL&limit=1"))
229 .await
230 {
231 Ok(_) => Ok(true),
232 Err(_) => Ok(false),
233 }
234 }
235
236 pub async fn get_quote(&self, symbol: &str) -> Result<Vec<crate::models::quote::StockQuote>> {
238 let endpoint = format!("quote/{}", symbol);
239 self.get_cached(&endpoint, None).await
240 }
241
242 pub async fn get_quotes_bulk(
244 &self,
245 symbols: &[&str],
246 ) -> Result<Vec<crate::models::quote::StockQuote>> {
247 let mut all_quotes = Vec::new();
248
249 let symbols_vec: Vec<String> = symbols.iter().map(|s| s.to_string()).collect();
251
252 let quotes = self
253 .process_bulk_data(symbols_vec, |_chunk| {
254 Box::pin(async move {
255 let chunk_quotes = Vec::new();
256 Ok(chunk_quotes)
259 })
260 })
261 .await?;
262
263 for mut quote_batch in quotes {
264 all_quotes.append(&mut quote_batch);
265 }
266
267 Ok(all_quotes)
268 }
269
270 pub async fn check_health(&self) -> Result<bool> {
272 self.health_check().await
273 }
274
275 pub async fn get_performance_metrics(&self) -> Option<Metrics> {
277 self.get_metrics().await
278 }
279
280 pub fn quote(&self) -> ProductionQuote {
284 ProductionQuote::new(self)
285 }
286
287 pub fn company_info(&self) -> ProductionCompanyInfo {
289 ProductionCompanyInfo::new(self)
290 }
291
292 }
294
295pub struct ProductionClientBuilder {
297 config: ProductionConfig,
298}
299
300impl ProductionClientBuilder {
301 pub fn new() -> Self {
302 Self {
303 config: ProductionConfig::default(),
304 }
305 }
306
307 pub fn api_key(mut self, api_key: impl Into<String>) -> Self {
308 self.config.api_key = api_key.into();
309 self
310 }
311
312 pub fn base_url(mut self, base_url: impl Into<String>) -> Self {
313 self.config.base_url = base_url.into();
314 self
315 }
316
317 pub fn timeout(mut self, timeout: Duration) -> Self {
318 self.config.timeout = timeout;
319 self
320 }
321
322 pub fn rate_limit(mut self, rate_limit: RateLimitConfig) -> Self {
323 self.config.rate_limit = rate_limit;
324 self
325 }
326
327 pub fn pool_config(mut self, pool: PoolConfig) -> Self {
328 self.config.pool = pool;
329 self
330 }
331
332 pub fn retry_config(mut self, retry: RetryConfig) -> Self {
333 self.config.retry = retry;
334 self
335 }
336
337 pub fn cache_config(mut self, cache: CacheConfig) -> Self {
338 self.config.cache = cache;
339 self
340 }
341
342 pub fn enable_caching(mut self, enable: bool) -> Self {
343 self.config.enable_caching = enable;
344 self
345 }
346
347 pub fn enable_metrics(mut self, enable: bool) -> Self {
348 self.config.enable_metrics = enable;
349 self
350 }
351
352 pub fn enable_tracing(mut self, enable: bool) -> Self {
353 self.config.enable_tracing = enable;
354 self
355 }
356
357 pub async fn build(self) -> Result<ProductionFmpClient> {
358 ProductionFmpClient::new(self.config).await
359 }
360}
361
362pub struct ProductionQuote<'a> {
364 client: &'a ProductionFmpClient,
365}
366
367impl<'a> ProductionQuote<'a> {
368 fn new(client: &'a ProductionFmpClient) -> Self {
369 Self { client }
370 }
371
372 pub async fn get_quote(&self, symbol: &str) -> Result<serde_json::Value> {
373 self.client
374 .get_cached(&format!("/quote/{}", symbol), None)
375 .await
376 }
377}
378
379pub struct ProductionCompanyInfo<'a> {
380 client: &'a ProductionFmpClient,
381}
382
383impl<'a> ProductionCompanyInfo<'a> {
384 fn new(client: &'a ProductionFmpClient) -> Self {
385 Self { client }
386 }
387
388 pub async fn get_profile(&self, symbol: &str) -> Result<serde_json::Value> {
389 self.client
390 .get_cached(&format!("/profile/{}", symbol), None)
391 .await
392 }
393}
394
395#[cfg(test)]
396mod tests {
397 use super::*;
398
399 #[tokio::test]
400 async fn test_production_client_builder() {
401 let config = ProductionConfig {
402 api_key: "test_key".to_string(),
403 enable_caching: false,
404 enable_metrics: false,
405 enable_tracing: false,
406 ..Default::default()
407 };
408
409 let client = ProductionFmpClient::new(config).await;
410 assert!(client.is_ok());
411 }
412
413 #[tokio::test]
414 async fn test_health_check() {
415 let config = ProductionConfig {
416 api_key: "test_key".to_string(),
417 enable_tracing: false,
418 ..Default::default()
419 };
420
421 let client = ProductionFmpClient::new(config).await.unwrap();
422 let _result = client.health_check().await;
424 }
425}