dakera-inference 0.3.0

Embedded inference engine for Dakera - generates embeddings locally
Documentation
//! Model configurations for supported embedding models.
//!
//! Supported models:
//! - **MiniLM** (all-MiniLM-L6-v2): Fast, 384 dimensions, good for general use
//! - **BGE-small** (BAAI/bge-small-en-v1.5): Balanced, 384 dimensions, high quality
//! - **E5-small** (intfloat/e5-small-v2): Quality-focused, 384 dimensions

use serde::{Deserialize, Serialize};

/// Supported embedding models.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize, Default)]
#[serde(rename_all = "kebab-case")]
pub enum EmbeddingModel {
    /// all-MiniLM-L6-v2 - Fast and efficient, good for general use
    /// - Dimensions: 384
    /// - Max tokens: 256
    /// - Speed: Fastest
    #[default]
    MiniLM,

    /// BAAI/bge-small-en-v1.5 - Balanced quality and speed
    /// - Dimensions: 384
    /// - Max tokens: 512
    /// - Speed: Medium
    BgeSmall,

    /// intfloat/e5-small-v2 - Higher quality embeddings
    /// - Dimensions: 384
    /// - Max tokens: 512
    /// - Speed: Medium
    E5Small,
}

impl EmbeddingModel {
    /// Get the HuggingFace model ID.
    pub fn model_id(&self) -> &'static str {
        match self {
            EmbeddingModel::MiniLM => "sentence-transformers/all-MiniLM-L6-v2",
            EmbeddingModel::BgeSmall => "BAAI/bge-small-en-v1.5",
            EmbeddingModel::E5Small => "intfloat/e5-small-v2",
        }
    }

    /// Get the embedding dimension for this model.
    pub fn dimension(&self) -> usize {
        match self {
            EmbeddingModel::MiniLM => 384,
            EmbeddingModel::BgeSmall => 384,
            EmbeddingModel::E5Small => 384,
        }
    }

    /// Get the maximum sequence length (in tokens).
    pub fn max_seq_length(&self) -> usize {
        match self {
            EmbeddingModel::MiniLM => 256,
            EmbeddingModel::BgeSmall => 512,
            EmbeddingModel::E5Small => 512,
        }
    }

    /// Get the query prefix for models that require it.
    /// Some models like E5 require a prefix for queries vs documents.
    pub fn query_prefix(&self) -> Option<&'static str> {
        match self {
            EmbeddingModel::MiniLM => None,
            EmbeddingModel::BgeSmall => None,
            EmbeddingModel::E5Small => Some("query: "),
        }
    }

    /// Get the document/passage prefix for models that require it.
    pub fn document_prefix(&self) -> Option<&'static str> {
        match self {
            EmbeddingModel::MiniLM => None,
            EmbeddingModel::BgeSmall => None,
            EmbeddingModel::E5Small => Some("passage: "),
        }
    }

    /// Whether this model uses mean pooling (vs CLS token).
    pub fn use_mean_pooling(&self) -> bool {
        match self {
            EmbeddingModel::MiniLM => true,
            EmbeddingModel::BgeSmall => true,
            EmbeddingModel::E5Small => true,
        }
    }

    /// Whether embeddings should be normalized.
    pub fn normalize_embeddings(&self) -> bool {
        true // All supported models use normalized embeddings
    }

    /// Get approximate tokens per second on CPU (for estimation).
    pub fn tokens_per_second_cpu(&self) -> usize {
        match self {
            EmbeddingModel::MiniLM => 5000,
            EmbeddingModel::BgeSmall => 3000,
            EmbeddingModel::E5Small => 3000,
        }
    }

    /// List all available models.
    pub fn all() -> &'static [EmbeddingModel] {
        &[
            EmbeddingModel::MiniLM,
            EmbeddingModel::BgeSmall,
            EmbeddingModel::E5Small,
        ]
    }

    /// Parse model from string (case-insensitive).
    pub fn parse(s: &str) -> Option<Self> {
        match s.to_lowercase().as_str() {
            "minilm" | "all-minilm-l6-v2" | "mini-lm" => Some(EmbeddingModel::MiniLM),
            "bge-small" | "bge" | "bge-small-en" => Some(EmbeddingModel::BgeSmall),
            "e5-small" | "e5" | "e5-small-v2" => Some(EmbeddingModel::E5Small),
            _ => None,
        }
    }
}

impl std::fmt::Display for EmbeddingModel {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            EmbeddingModel::MiniLM => write!(f, "all-MiniLM-L6-v2"),
            EmbeddingModel::BgeSmall => write!(f, "bge-small-en-v1.5"),
            EmbeddingModel::E5Small => write!(f, "e5-small-v2"),
        }
    }
}

/// Configuration for model loading and inference.
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ModelConfig {
    /// The embedding model to use.
    pub model: EmbeddingModel,

    /// Custom cache directory for model files.
    /// If None, uses HuggingFace default cache.
    pub cache_dir: Option<String>,

    /// Maximum batch size for inference.
    pub max_batch_size: usize,

    /// Whether to use GPU acceleration if available.
    pub use_gpu: bool,

    /// Number of threads for CPU inference.
    pub num_threads: Option<usize>,
}

impl Default for ModelConfig {
    fn default() -> Self {
        Self {
            model: EmbeddingModel::default(),
            cache_dir: None,
            max_batch_size: 32,
            use_gpu: false,
            num_threads: None,
        }
    }
}

impl ModelConfig {
    /// Create a new config with the specified model.
    pub fn new(model: EmbeddingModel) -> Self {
        Self {
            model,
            ..Default::default()
        }
    }

    /// Set the cache directory.
    pub fn with_cache_dir(mut self, dir: impl Into<String>) -> Self {
        self.cache_dir = Some(dir.into());
        self
    }

    /// Set the maximum batch size.
    pub fn with_max_batch_size(mut self, size: usize) -> Self {
        self.max_batch_size = size;
        self
    }

    /// Enable GPU acceleration.
    pub fn with_gpu(mut self, use_gpu: bool) -> Self {
        self.use_gpu = use_gpu;
        self
    }

    /// Set the number of CPU threads.
    pub fn with_num_threads(mut self, threads: usize) -> Self {
        self.num_threads = Some(threads);
        self
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    #[test]
    fn test_model_ids() {
        assert_eq!(
            EmbeddingModel::MiniLM.model_id(),
            "sentence-transformers/all-MiniLM-L6-v2"
        );
        assert_eq!(
            EmbeddingModel::BgeSmall.model_id(),
            "BAAI/bge-small-en-v1.5"
        );
        assert_eq!(EmbeddingModel::E5Small.model_id(), "intfloat/e5-small-v2");
    }

    #[test]
    fn test_dimensions() {
        for model in EmbeddingModel::all() {
            assert_eq!(model.dimension(), 384);
        }
    }

    #[test]
    fn test_from_str() {
        assert_eq!(
            EmbeddingModel::parse("minilm"),
            Some(EmbeddingModel::MiniLM)
        );
        assert_eq!(
            EmbeddingModel::parse("BGE-SMALL"),
            Some(EmbeddingModel::BgeSmall)
        );
        assert_eq!(EmbeddingModel::parse("e5"), Some(EmbeddingModel::E5Small));
        assert_eq!(EmbeddingModel::parse("unknown"), None);
    }

    #[test]
    fn test_e5_prefixes() {
        assert_eq!(EmbeddingModel::E5Small.query_prefix(), Some("query: "));
        assert_eq!(EmbeddingModel::E5Small.document_prefix(), Some("passage: "));
        assert_eq!(EmbeddingModel::MiniLM.query_prefix(), None);
    }
}