inference/lib.rs
1//! # Dakera Inference Engine
2//!
3//! Embedded inference engine for generating vector embeddings locally without
4//! external API calls. This crate provides:
5//!
6//! - **Local Embedding Generation**: Generate embeddings using state-of-the-art
7//! transformer models running locally on CPU or GPU.
8//! - **Multiple Model Support**: Choose from MiniLM (fast), BGE (balanced), or E5 (quality).
9//! - **Batch Processing**: Efficient batch processing with automatic batching and parallelization.
10//! - **Zero External Dependencies**: No OpenAI, Cohere, or other API keys required.
11//!
12//! ## Quick Start
13//!
14//! ```no_run
15//! use inference::{EmbeddingEngine, ModelConfig, EmbeddingModel};
16//!
17//! #[tokio::main]
18//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
19//! // Create engine with default model (MiniLM)
20//! let engine = EmbeddingEngine::new(ModelConfig::default()).await?;
21//!
22//! // Embed a query
23//! let query_embedding = engine.embed_query("What is machine learning?").await?;
24//! println!("Query embedding: {} dimensions", query_embedding.len());
25//!
26//! // Embed documents
27//! let docs = vec![
28//! "Machine learning is a type of artificial intelligence.".to_string(),
29//! "Deep learning uses neural networks with many layers.".to_string(),
30//! ];
31//! let doc_embeddings = engine.embed_documents(&docs).await?;
32//! println!("Generated {} document embeddings", doc_embeddings.len());
33//!
34//! Ok(())
35//! }
36//! ```
37//!
38//! ## Model Selection
39//!
40//! Choose the right model for your use case:
41//!
42//! | Model | Speed | Quality | Use Case |
43//! |-------|-------|---------|----------|
44//! | MiniLM | ⚡⚡⚡ | ⭐⭐ | High-throughput, real-time |
45//! | BGE-small | ⚡⚡ | ⭐⭐⭐ | Balanced performance |
46//! | E5-small | ⚡⚡ | ⭐⭐⭐ | Best quality for retrieval |
47//!
48//! ## GPU Acceleration
49//!
50//! Enable GPU acceleration by building with the appropriate feature:
51//!
52//! ```toml
53//! # For NVIDIA GPUs
54//! inference = { path = "crates/inference", features = ["cuda"] }
55//!
56//! # For Apple Silicon
57//! inference = { path = "crates/inference", features = ["metal"] }
58//! ```
59//!
60//! ## Architecture
61//!
62//! ```text
63//! ┌─────────────────────────────────────────────────────────────┐
64//! │ EmbeddingEngine │
65//! │ ┌─────────────┐ ┌──────────────┐ ┌──────────────────┐ │
66//! │ │ ModelConfig │ │ BatchProcessor│ │ ort::Session │ │
67//! │ │ - model │ │ - tokenizer │ │ (ONNX Runtime) │ │
68//! │ │ - threads │ │ - batching │ │ - BERT INT8 │ │
69//! │ │ - batch_sz │ │ - prefixes │ │ - mean_pool() │ │
70//! │ └─────────────┘ └──────────────┘ └──────────────────┘ │
71//! └─────────────────────────────────────────────────────────────┘
72//! │
73//! ▼
74//! ┌───────────────────────────────┐
75//! │ Vec<f32> Embeddings │
76//! │ (normalized, model-dim dims) │
77//! └───────────────────────────────┘
78//! ```
79
80pub mod batch;
81pub mod engine;
82pub mod error;
83pub mod extraction;
84pub mod models;
85pub mod ner;
86pub mod reranker;
87
88// Re-exports for convenience
89pub use engine::{EmbeddingEngine, EmbeddingEngineBuilder};
90pub use error::{InferenceError, Result};
91pub use extraction::{
92 build_provider, ExtractionOpts, ExtractionProvider, ExtractionResult, ExtractorConfig,
93};
94pub use models::{EmbeddingModel, ModelConfig};
95pub use ner::{rule_based_extract, ExtractedEntity, GlinerEngine, NerEngine};
96pub use reranker::CrossEncoderEngine;
97
98/// Prelude module for convenient imports.
99pub mod prelude {
100 pub use crate::engine::{EmbeddingEngine, EmbeddingEngineBuilder};
101 pub use crate::error::{InferenceError, Result};
102 pub use crate::models::{EmbeddingModel, ModelConfig};
103}
104
105#[cfg(test)]
106mod tests {
107 use super::*;
108
109 #[test]
110 fn test_model_defaults() {
111 let config = ModelConfig::default();
112 assert_eq!(config.model, EmbeddingModel::BgeLarge);
113 assert_eq!(config.max_batch_size, 32);
114 assert!(!config.use_gpu);
115 }
116
117 #[test]
118 fn test_model_dimensions() {
119 assert_eq!(EmbeddingModel::BgeLarge.dimension(), 1024);
120 assert_eq!(EmbeddingModel::MiniLM.dimension(), 384);
121 assert_eq!(EmbeddingModel::BgeSmall.dimension(), 384);
122 assert_eq!(EmbeddingModel::E5Small.dimension(), 384);
123 }
124}