inference/lib.rs
1//! # Dakera Inference Engine
2//!
3//! Embedded inference engine for generating vector embeddings locally without
4//! external API calls. This crate provides:
5//!
6//! - **Local Embedding Generation**: Generate embeddings using state-of-the-art
7//! transformer models running locally on CPU or GPU.
8//! - **Multiple Model Support**: Choose from MiniLM (fast), BGE (balanced), or E5 (quality).
9//! - **Batch Processing**: Efficient batch processing with automatic batching and parallelization.
10//! - **Zero External Dependencies**: No OpenAI, Cohere, or other API keys required.
11//!
12//! ## Quick Start
13//!
14//! ```no_run
15//! use inference::{EmbeddingEngine, ModelConfig, EmbeddingModel};
16//!
17//! #[tokio::main]
18//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
19//! // Create engine with default model (MiniLM)
20//! let engine = EmbeddingEngine::new(ModelConfig::default()).await?;
21//!
22//! // Embed a query
23//! let query_embedding = engine.embed_query("What is machine learning?").await?;
24//! println!("Query embedding: {} dimensions", query_embedding.len());
25//!
26//! // Embed documents
27//! let docs = vec![
28//! "Machine learning is a type of artificial intelligence.".to_string(),
29//! "Deep learning uses neural networks with many layers.".to_string(),
30//! ];
31//! let doc_embeddings = engine.embed_documents(&docs).await?;
32//! println!("Generated {} document embeddings", doc_embeddings.len());
33//!
34//! Ok(())
35//! }
36//! ```
37//!
38//! ## Model Selection
39//!
40//! Choose the right model for your use case:
41//!
42//! | Model | Speed | Quality | Use Case |
43//! |-------|-------|---------|----------|
44//! | MiniLM | ⚡⚡⚡ | ⭐⭐ | High-throughput, real-time |
45//! | BGE-small | ⚡⚡ | ⭐⭐⭐ | Balanced performance |
46//! | E5-small | ⚡⚡ | ⭐⭐⭐ | Best quality for retrieval |
47//!
48//! ## GPU Acceleration
49//!
50//! Enable GPU acceleration by building with the appropriate feature:
51//!
52//! ```toml
53//! # For NVIDIA GPUs
54//! inference = { path = "crates/inference", features = ["cuda"] }
55//!
56//! # For Apple Silicon
57//! inference = { path = "crates/inference", features = ["metal"] }
58//! ```
59//!
60//! ## Architecture
61//!
62//! ```text
63//! ┌─────────────────────────────────────────────────────────────┐
64//! │ EmbeddingEngine │
65//! │ ┌─────────────┐ ┌──────────────┐ ┌──────────────────┐ │
66//! │ │ ModelConfig │ │ BatchProcessor│ │ BertModel │ │
67//! │ │ - model │ │ - tokenizer │ │ (Candle) │ │
68//! │ │ - device │ │ - batching │ │ - forward() │ │
69//! │ │ - batch_sz │ │ - prefixes │ │ - mean_pool() │ │
70//! │ └─────────────┘ └──────────────┘ └──────────────────┘ │
71//! └─────────────────────────────────────────────────────────────┘
72//! │
73//! ▼
74//! ┌───────────────────────────────┐
75//! │ Vec<f32> Embeddings │
76//! │ (normalized, 384 dims) │
77//! └───────────────────────────────┘
78//! ```
79
80pub mod batch;
81pub mod engine;
82pub mod error;
83pub mod models;
84
85// Re-exports for convenience
86pub use engine::{EmbeddingEngine, EmbeddingEngineBuilder};
87pub use error::{InferenceError, Result};
88pub use models::{EmbeddingModel, ModelConfig};
89
90/// Prelude module for convenient imports.
91pub mod prelude {
92 pub use crate::engine::{EmbeddingEngine, EmbeddingEngineBuilder};
93 pub use crate::error::{InferenceError, Result};
94 pub use crate::models::{EmbeddingModel, ModelConfig};
95}
96
97#[cfg(test)]
98mod tests {
99 use super::*;
100
101 #[test]
102 fn test_model_defaults() {
103 let config = ModelConfig::default();
104 assert_eq!(config.model, EmbeddingModel::MiniLM);
105 assert_eq!(config.max_batch_size, 32);
106 assert!(!config.use_gpu);
107 }
108
109 #[test]
110 fn test_model_dimensions() {
111 assert_eq!(EmbeddingModel::MiniLM.dimension(), 384);
112 assert_eq!(EmbeddingModel::BgeSmall.dimension(), 384);
113 assert_eq!(EmbeddingModel::E5Small.dimension(), 384);
114 }
115}