1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
//! Embedding service module for generating and caching text embeddings.
//!
//! This module provides a complete embedding service infrastructure with:
//! - OpenAI API integration (text-embedding-3-small)
//! - LRU caching with TTL support
//! - Retry logic with exponential backoff
//! - Batch processing for efficient bulk operations
//! - Cost tracking and metrics
//! - Extensible provider architecture
//!
//! # Examples
//!
//! ```ignore
//! use maproom::embedding::{EmbeddingService, EmbeddingConfig};
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Create service from environment variables
//! let service = EmbeddingService::from_env()?;
//!
//! // Embed a single text
//! let embedding = service.embed_text("Hello, world!").await?;
//! println!("Embedding dimension: {}", embedding.len());
//!
//! // Embed a batch of texts
//! let texts = vec![
//! "First text".to_string(),
//! "Second text".to_string(),
//! ];
//! let embeddings = service.embed_batch(texts).await?;
//! println!("Generated {} embeddings", embeddings.len());
//!
//! // Get metrics
//! let cache_metrics = service.cache_metrics().await;
//! println!("Cache hit rate: {:.1}%", cache_metrics.hit_rate() * 100.0);
//!
//! let cost_metrics = service.cost_metrics();
//! println!("Estimated cost: ${:.4}", cost_metrics.estimated_cost_usd());
//!
//! Ok(())
//! }
//! ```
// Re-export main types for convenience
pub use ;
pub use ;
pub use ;
pub use ;
pub use ;
pub use create_provider_from_env;
pub use ;
pub use OllamaProvider;
pub use ;
pub use ;
pub use ;