memvid_cli/
config.rs

1//! CLI configuration and environment handling
2//!
3//! This module provides configuration loading from environment variables,
4//! tracing initialization, and embedding runtime management for semantic search.
5
6use std::env;
7use std::path::PathBuf;
8use std::str::FromStr;
9use std::sync::atomic::{AtomicUsize, Ordering};
10
11use anyhow::{anyhow, Result};
12use ed25519_dalek::VerifyingKey;
13
14const DEFAULT_API_URL: &str = "https://memvid.com";
15const DEFAULT_CACHE_DIR: &str = "~/.cache/memvid";
16
17/// Supported embedding models for semantic search
18#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
19pub enum EmbeddingModelChoice {
20    /// BGE-small-en-v1.5: Fast, 384-dim, ~78% accuracy (default)
21    #[default]
22    BgeSmall,
23    /// BGE-base-en-v1.5: Balanced, 768-dim, ~85% accuracy
24    BgeBase,
25    /// Nomic-embed-text-v1.5: High accuracy, 768-dim, ~86% accuracy
26    Nomic,
27    /// GTE-large-en-v1.5: Best semantic depth, 1024-dim
28    GteLarge,
29    /// OpenAI text-embedding-3-large: Highest quality, 3072-dim (requires OPENAI_API_KEY)
30    OpenAILarge,
31    /// OpenAI text-embedding-3-small: Good quality, 1536-dim (requires OPENAI_API_KEY)
32    OpenAISmall,
33    /// OpenAI text-embedding-ada-002: Legacy model, 1536-dim (requires OPENAI_API_KEY)
34    OpenAIAda,
35    /// NVIDIA nv-embed-v1: High quality, remote embeddings (requires NVIDIA_API_KEY)
36    Nvidia,
37    /// Gemini text-embedding-004: Google AI embeddings, 768-dim (requires GOOGLE_API_KEY or GEMINI_API_KEY)
38    Gemini,
39    /// Mistral mistral-embed: Mistral AI embeddings, 1024-dim (requires MISTRAL_API_KEY)
40    Mistral,
41}
42
43impl EmbeddingModelChoice {
44    /// Check if this is an OpenAI model (requires OPENAI_API_KEY)
45    pub fn is_openai(&self) -> bool {
46        matches!(
47            self,
48            EmbeddingModelChoice::OpenAILarge
49                | EmbeddingModelChoice::OpenAISmall
50                | EmbeddingModelChoice::OpenAIAda
51        )
52    }
53
54    /// Check if this is a remote/cloud model (not local fastembed)
55    pub fn is_remote(&self) -> bool {
56        matches!(
57            self,
58            EmbeddingModelChoice::OpenAILarge
59                | EmbeddingModelChoice::OpenAISmall
60                | EmbeddingModelChoice::OpenAIAda
61                | EmbeddingModelChoice::Nvidia
62                | EmbeddingModelChoice::Gemini
63                | EmbeddingModelChoice::Mistral
64        )
65    }
66
67    /// Get the fastembed EmbeddingModel enum value (only for local models)
68    ///
69    /// # Panics
70    /// Panics if called on an OpenAI model. Use `is_openai()` to check first.
71    pub fn to_fastembed_model(&self) -> fastembed::EmbeddingModel {
72        match self {
73            EmbeddingModelChoice::BgeSmall => fastembed::EmbeddingModel::BGESmallENV15,
74            EmbeddingModelChoice::BgeBase => fastembed::EmbeddingModel::BGEBaseENV15,
75            EmbeddingModelChoice::Nomic => fastembed::EmbeddingModel::NomicEmbedTextV15,
76            EmbeddingModelChoice::GteLarge => fastembed::EmbeddingModel::GTELargeENV15,
77            EmbeddingModelChoice::OpenAILarge
78            | EmbeddingModelChoice::OpenAISmall
79            | EmbeddingModelChoice::OpenAIAda => {
80                panic!("OpenAI models don't use fastembed. Check is_remote() first.")
81            }
82            EmbeddingModelChoice::Nvidia => {
83                panic!("NVIDIA embeddings don't use fastembed. Check is_remote() first.")
84            }
85            EmbeddingModelChoice::Gemini => {
86                panic!("Gemini embeddings don't use fastembed. Check is_remote() first.")
87            }
88            EmbeddingModelChoice::Mistral => {
89                panic!("Mistral embeddings don't use fastembed. Check is_remote() first.")
90            }
91        }
92    }
93
94    /// Get human-readable model name
95    pub fn name(&self) -> &'static str {
96        match self {
97            EmbeddingModelChoice::BgeSmall => "bge-small",
98            EmbeddingModelChoice::BgeBase => "bge-base",
99            EmbeddingModelChoice::Nomic => "nomic",
100            EmbeddingModelChoice::GteLarge => "gte-large",
101            EmbeddingModelChoice::OpenAILarge => "openai-large",
102            EmbeddingModelChoice::OpenAISmall => "openai-small",
103            EmbeddingModelChoice::OpenAIAda => "openai-ada",
104            EmbeddingModelChoice::Nvidia => "nvidia",
105            EmbeddingModelChoice::Gemini => "gemini",
106            EmbeddingModelChoice::Mistral => "mistral",
107        }
108    }
109
110    /// Get the canonical provider model identifier used for persisted metadata.
111    ///
112    /// This is intended to match upstream provider IDs (OpenAI) and HuggingFace-style IDs
113    /// (fastembed/ONNX) so that memories can record an embedding "identity" that other
114    /// runtimes can select deterministically.
115    pub fn canonical_model_id(&self) -> &'static str {
116        match self {
117            EmbeddingModelChoice::BgeSmall => "BAAI/bge-small-en-v1.5",
118            EmbeddingModelChoice::BgeBase => "BAAI/bge-base-en-v1.5",
119            EmbeddingModelChoice::Nomic => "nomic-embed-text-v1.5",
120            EmbeddingModelChoice::GteLarge => "thenlper/gte-large",
121            EmbeddingModelChoice::OpenAILarge => "text-embedding-3-large",
122            EmbeddingModelChoice::OpenAISmall => "text-embedding-3-small",
123            EmbeddingModelChoice::OpenAIAda => "text-embedding-ada-002",
124            EmbeddingModelChoice::Nvidia => "nvidia/nv-embed-v1",
125            EmbeddingModelChoice::Gemini => "text-embedding-004",
126            EmbeddingModelChoice::Mistral => "mistral-embed",
127        }
128    }
129
130    /// Get embedding dimensions
131    pub fn dimensions(&self) -> usize {
132        match self {
133            EmbeddingModelChoice::BgeSmall => 384,
134            EmbeddingModelChoice::BgeBase => 768,
135            EmbeddingModelChoice::Nomic => 768,
136            EmbeddingModelChoice::GteLarge => 1024,
137            EmbeddingModelChoice::OpenAILarge => 3072,
138            EmbeddingModelChoice::OpenAISmall => 1536,
139            EmbeddingModelChoice::OpenAIAda => 1536,
140            // Remote model; infer from the first embedding response.
141            EmbeddingModelChoice::Nvidia => 0,
142            EmbeddingModelChoice::Gemini => 768,
143            EmbeddingModelChoice::Mistral => 1024,
144        }
145    }
146}
147
148impl FromStr for EmbeddingModelChoice {
149    type Err = anyhow::Error;
150
151    fn from_str(s: &str) -> Result<Self> {
152        let lowered = s.trim().to_ascii_lowercase();
153        match lowered.as_str() {
154            "bge-small" | "bge_small" | "bgesmall" | "small" => Ok(EmbeddingModelChoice::BgeSmall),
155            "baai/bge-small-en-v1.5" => Ok(EmbeddingModelChoice::BgeSmall),
156            "bge-base" | "bge_base" | "bgebase" | "base" => Ok(EmbeddingModelChoice::BgeBase),
157            "baai/bge-base-en-v1.5" => Ok(EmbeddingModelChoice::BgeBase),
158            "nomic" | "nomic-embed" | "nomic_embed" => Ok(EmbeddingModelChoice::Nomic),
159            "nomic-embed-text-v1.5" => Ok(EmbeddingModelChoice::Nomic),
160            "gte-large" | "gte_large" | "gtelarge" | "gte" => Ok(EmbeddingModelChoice::GteLarge),
161            "thenlper/gte-large" => Ok(EmbeddingModelChoice::GteLarge),
162            // OpenAI models - default "openai" maps to "openai-large" for highest quality
163            "openai" | "openai-large" | "openai_large" | "text-embedding-3-large" => {
164                Ok(EmbeddingModelChoice::OpenAILarge)
165            }
166            "openai-small" | "openai_small" | "text-embedding-3-small" => {
167                Ok(EmbeddingModelChoice::OpenAISmall)
168            }
169            "openai-ada" | "openai_ada" | "text-embedding-ada-002" | "ada" => {
170                Ok(EmbeddingModelChoice::OpenAIAda)
171            }
172            "nvidia" | "nv" | "nv-embed-v1" | "nvidia/nv-embed-v1" => Ok(EmbeddingModelChoice::Nvidia),
173            _ if lowered.starts_with("nvidia/") || lowered.starts_with("nvidia:") || lowered.starts_with("nv:") => {
174                Ok(EmbeddingModelChoice::Nvidia)
175            }
176            // Gemini embeddings
177            "gemini" | "gemini-embed" | "text-embedding-004" | "gemini-embedding-001" => {
178                Ok(EmbeddingModelChoice::Gemini)
179            }
180            _ if lowered.starts_with("gemini/") || lowered.starts_with("gemini:") || lowered.starts_with("google:") => {
181                Ok(EmbeddingModelChoice::Gemini)
182            }
183            // Mistral embeddings
184            "mistral" | "mistral-embed" => Ok(EmbeddingModelChoice::Mistral),
185            _ if lowered.starts_with("mistral/") || lowered.starts_with("mistral:") => {
186                Ok(EmbeddingModelChoice::Mistral)
187            }
188            _ => Err(anyhow!(
189                "unknown embedding model '{}'. Valid options: bge-small, bge-base, nomic, gte-large, openai, openai-small, openai-ada, nvidia, gemini, mistral",
190                s
191            )),
192        }
193    }
194}
195
196impl std::fmt::Display for EmbeddingModelChoice {
197    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
198        write!(f, "{}", self.name())
199    }
200}
201
202impl EmbeddingModelChoice {
203    /// Infer the best embedding model from vector dimension stored in MV2 file.
204    ///
205    /// This enables auto-detection: users don't need to specify --query-embedding-model
206    /// if the MV2 file has vectors. The dimension uniquely identifies the model family.
207    ///
208    /// # Dimension Mapping
209    /// - 384  → BGE-small (default local model)
210    /// - 768  → BGE-base (could also be Nomic, but same dimension works)
211    /// - 1024 → GTE-large
212    /// - 1536 → OpenAI small/ada
213    /// - 3072 → OpenAI large
214    pub fn from_dimension(dim: u32) -> Option<Self> {
215        match dim {
216            384 => Some(EmbeddingModelChoice::BgeSmall),
217            768 => Some(EmbeddingModelChoice::BgeBase), // Could be Nomic, but same dim
218            1024 => Some(EmbeddingModelChoice::GteLarge),
219            1536 => Some(EmbeddingModelChoice::OpenAISmall), // Could be Ada, same dim
220            3072 => Some(EmbeddingModelChoice::OpenAILarge),
221            0 => None, // No vectors in file
222            _ => {
223                tracing::warn!("Unknown embedding dimension {}, using default model", dim);
224                None
225            }
226        }
227    }
228}
229
230/// CLI configuration loaded from environment variables
231#[derive(Debug, Clone)]
232pub struct CliConfig {
233    pub api_key: Option<String>,
234    pub api_url: String,
235    pub cache_dir: PathBuf,
236    pub ticket_pubkey: Option<VerifyingKey>,
237    pub models_dir: PathBuf,
238    pub offline: bool,
239    /// Embedding model for semantic search (can be overridden by CLI flag)
240    pub embedding_model: EmbeddingModelChoice,
241}
242
243impl PartialEq for CliConfig {
244    fn eq(&self, other: &Self) -> bool {
245        self.api_key == other.api_key
246            && self.api_url == other.api_url
247            && self.cache_dir == other.cache_dir
248            && self.models_dir == other.models_dir
249            && self.offline == other.offline
250            && self.embedding_model == other.embedding_model
251    }
252}
253
254impl Eq for CliConfig {}
255
256impl CliConfig {
257    pub fn load() -> Result<Self> {
258        let api_key = env::var("MEMVID_API_KEY").ok().and_then(|value| {
259            let trimmed = value.trim().to_string();
260            (!trimmed.is_empty()).then_some(trimmed)
261        });
262
263        let api_url = env::var("MEMVID_API_URL").unwrap_or_else(|_| DEFAULT_API_URL.to_string());
264
265        let cache_dir_raw =
266            env::var("MEMVID_CACHE_DIR").unwrap_or_else(|_| DEFAULT_CACHE_DIR.to_string());
267        let cache_dir = expand_path(&cache_dir_raw)?;
268
269        let models_dir_raw =
270            env::var("MEMVID_MODELS_DIR").unwrap_or_else(|_| "~/.memvid/models".to_string());
271        let models_dir = expand_path(&models_dir_raw)?;
272
273        // Default public key for memvid.com dashboard ticket verification
274        // This allows users to use --memory-id without setting MEMVID_TICKET_PUBKEY
275        const DEFAULT_TICKET_PUBKEY: &str = "8wP1J2H+Tlx3PM3eT0lN2wDvoYrvl1DREKGKVb/V2cw=";
276
277        let ticket_pubkey_str = env::var("MEMVID_TICKET_PUBKEY")
278            .ok()
279            .and_then(|value| {
280                let trimmed = value.trim();
281                if trimmed.is_empty() {
282                    None
283                } else {
284                    Some(trimmed.to_string())
285                }
286            })
287            .unwrap_or_else(|| DEFAULT_TICKET_PUBKEY.to_string());
288
289        let ticket_pubkey = Some(memvid_core::parse_ed25519_public_key_base64(&ticket_pubkey_str)?);
290
291        let offline = env::var("MEMVID_OFFLINE")
292            .ok()
293            .map(|value| match value.trim().to_ascii_lowercase().as_str() {
294                "1" | "true" | "yes" => true,
295                _ => false,
296            })
297            .unwrap_or(false);
298
299        // Load embedding model from env var, default to BGE-small
300        let embedding_model = env::var("MEMVID_EMBEDDING_MODEL")
301            .ok()
302            .and_then(|value| {
303                let trimmed = value.trim();
304                if trimmed.is_empty() {
305                    None
306                } else {
307                    EmbeddingModelChoice::from_str(trimmed).ok()
308                }
309            })
310            .unwrap_or_default();
311
312        Ok(Self {
313            api_key,
314            api_url,
315            cache_dir,
316            ticket_pubkey,
317            models_dir,
318            offline,
319            embedding_model,
320        })
321    }
322
323    /// Create a new config with a different embedding model
324    pub fn with_embedding_model(&self, model: EmbeddingModelChoice) -> Self {
325        Self {
326            embedding_model: model,
327            ..self.clone()
328        }
329    }
330}
331
332fn expand_path(value: &str) -> Result<PathBuf> {
333    if value.trim().is_empty() {
334        return Err(anyhow!("cache directory cannot be empty"));
335    }
336
337    let expanded = if let Some(stripped) = value.strip_prefix("~/") {
338        home_dir()?.join(stripped)
339    } else if let Some(stripped) = value.strip_prefix("~\\") {
340        // Support Windows-style "~\" prefix.
341        home_dir()?.join(stripped)
342    } else if value == "~" {
343        home_dir()?
344    } else {
345        PathBuf::from(value)
346    };
347
348    if expanded.is_absolute() {
349        Ok(expanded)
350    } else {
351        Ok(env::current_dir()?.join(expanded))
352    }
353}
354
355fn home_dir() -> Result<PathBuf> {
356    if let Some(path) = env::var_os("HOME") {
357        if !path.is_empty() {
358            return Ok(PathBuf::from(path));
359        }
360    }
361
362    #[cfg(windows)]
363    {
364        if let Some(path) = env::var_os("USERPROFILE") {
365            if !path.is_empty() {
366                return Ok(PathBuf::from(path));
367            }
368        }
369        if let (Some(drive), Some(path)) = (env::var_os("HOMEDRIVE"), env::var_os("HOMEPATH")) {
370            if !drive.is_empty() && !path.is_empty() {
371                return Ok(PathBuf::from(format!(
372                    "{}{}",
373                    drive.to_string_lossy(),
374                    path.to_string_lossy()
375                )));
376            }
377        }
378    }
379
380    Err(anyhow!("unable to resolve home directory"))
381}
382
383#[cfg(test)]
384mod tests {
385    use super::*;
386    use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
387    use base64::Engine;
388    use ed25519_dalek::SigningKey;
389    use std::sync::{Mutex, OnceLock};
390
391    fn env_lock() -> std::sync::MutexGuard<'static, ()> {
392        static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
393        LOCK.get_or_init(|| Mutex::new(())).lock().unwrap()
394    }
395
396    fn set_or_unset(var: &str, value: Option<String>) {
397        match value {
398            Some(v) => unsafe { env::set_var(var, v) },
399            None => unsafe { env::remove_var(var) },
400        }
401    }
402
403    #[test]
404    fn defaults_expand_using_home_directory() {
405        let _guard = env_lock();
406
407        let previous_home = env::var("HOME").ok();
408        #[cfg(windows)]
409        let previous_userprofile = env::var("USERPROFILE").ok();
410
411        for var in [
412            "MEMVID_API_KEY",
413            "MEMVID_API_URL",
414            "MEMVID_CACHE_DIR",
415            "MEMVID_TICKET_PUBKEY",
416            "MEMVID_MODELS_DIR",
417            "MEMVID_OFFLINE",
418        ] {
419            unsafe { env::remove_var(var) };
420        }
421
422        let tmp = tempfile::tempdir().expect("tmpdir");
423        let tmp_path = tmp.path().to_path_buf();
424        unsafe { env::set_var("HOME", &tmp_path) };
425        #[cfg(windows)]
426        unsafe {
427            env::set_var("USERPROFILE", &tmp_path)
428        };
429
430        let config = CliConfig::load().expect("load");
431        assert_eq!(config.api_key, None);
432        assert_eq!(config.api_url, "https://memvid.com");
433        assert_eq!(config.cache_dir, tmp_path.join(".cache/memvid"));
434        assert!(config.ticket_pubkey.is_none());
435        assert_eq!(config.models_dir, tmp_path.join(".memvid/models"));
436        assert!(!config.offline);
437
438        set_or_unset("HOME", previous_home);
439        #[cfg(windows)]
440        {
441            set_or_unset("USERPROFILE", previous_userprofile);
442        }
443    }
444
445    #[test]
446    fn env_overrides_are_respected() {
447        let _guard = env_lock();
448
449        let previous_env: Vec<(&'static str, Option<String>)> = [
450            "MEMVID_API_KEY",
451            "MEMVID_API_URL",
452            "MEMVID_CACHE_DIR",
453            "MEMVID_TICKET_PUBKEY",
454            "MEMVID_MODELS_DIR",
455            "MEMVID_OFFLINE",
456        ]
457        .into_iter()
458        .map(|var| (var, env::var(var).ok()))
459        .collect();
460
461        unsafe { env::set_var("MEMVID_API_KEY", "abc123") };
462        unsafe { env::set_var("MEMVID_API_URL", "https://staging.memvid.app") };
463        unsafe { env::set_var("MEMVID_CACHE_DIR", "~/memvid-cache") };
464        unsafe { env::set_var("MEMVID_MODELS_DIR", "~/models") };
465        unsafe { env::set_var("MEMVID_OFFLINE", "true") };
466        let signing = SigningKey::from_bytes(&[9u8; 32]);
467        let encoded = BASE64_STANDARD.encode(signing.verifying_key().as_bytes());
468        unsafe { env::set_var("MEMVID_TICKET_PUBKEY", encoded) };
469
470        let tmp = tempfile::tempdir().expect("tmpdir");
471        let tmp_path = tmp.path().to_path_buf();
472        unsafe { env::set_var("HOME", &tmp_path) };
473        #[cfg(windows)]
474        unsafe {
475            env::set_var("USERPROFILE", &tmp_path)
476        };
477
478        let config = CliConfig::load().expect("load");
479        assert_eq!(config.api_key.as_deref(), Some("abc123"));
480        assert_eq!(config.api_url, "https://staging.memvid.app");
481        assert_eq!(config.cache_dir, tmp_path.join("memvid-cache"));
482        assert_eq!(
483            config.ticket_pubkey.expect("pubkey").as_bytes(),
484            signing.verifying_key().as_bytes()
485        );
486        assert_eq!(config.models_dir, tmp_path.join("models"));
487        assert!(config.offline);
488
489        for (var, value) in previous_env {
490            set_or_unset(var, value);
491        }
492    }
493
494    #[test]
495    fn rejects_empty_cache_dir() {
496        let _guard = env_lock();
497
498        let previous = env::var("MEMVID_CACHE_DIR").ok();
499        unsafe { env::set_var("MEMVID_CACHE_DIR", " ") };
500        let err = CliConfig::load().expect_err("should fail");
501        assert!(err.to_string().contains("cache directory"));
502        set_or_unset("MEMVID_CACHE_DIR", previous);
503    }
504}
505
506/// Initialize tracing/logging based on verbosity level
507pub fn init_tracing(verbosity: u8) -> Result<()> {
508    use std::io::IsTerminal;
509    use tracing_subscriber::{filter::Directive, fmt, EnvFilter};
510
511    let level = match verbosity {
512        0 => "warn",
513        1 => "info",
514        2 => "debug",
515        _ => "trace",
516    };
517
518    let mut env_filter =
519        EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level));
520    for directive_str in ["llama_cpp=error", "llama_cpp_sys=error", "ggml=error"] {
521        if let Ok(directive) = directive_str.parse::<Directive>() {
522            env_filter = env_filter.add_directive(directive);
523        }
524    }
525
526    // Disable ANSI color codes when stderr is not a terminal (e.g., piped or
527    // combined with `2>&1`). This prevents control characters from polluting
528    // JSON output when combined with stdout.
529    let use_ansi = std::io::stderr().is_terminal();
530
531    fmt()
532        .with_env_filter(env_filter)
533        .with_writer(std::io::stderr)
534        .with_target(false)
535        .without_time()
536        .with_ansi(use_ansi)
537        .try_init()
538        .map_err(|err| anyhow!(err))?;
539    Ok(())
540}
541
542/// Resolve LLM context budget override from CLI or environment
543pub fn resolve_llm_context_budget_override(cli_value: Option<usize>) -> Result<Option<usize>> {
544    use anyhow::bail;
545
546    if let Some(value) = cli_value {
547        if value == 0 {
548            bail!("--llm-context-depth must be a positive integer");
549        }
550        return Ok(Some(value));
551    }
552
553    let raw_env = match env::var("MEMVID_LLM_CONTEXT_BUDGET") {
554        Ok(value) => value,
555        Err(_) => return Ok(None),
556    };
557
558    let trimmed = raw_env.trim();
559    if trimmed.is_empty() {
560        return Ok(None);
561    }
562
563    let digits: String = trimmed
564        .chars()
565        .filter(|ch| !ch.is_ascii_whitespace() && *ch != '_')
566        .collect();
567
568    if digits.is_empty() {
569        bail!("MEMVID_LLM_CONTEXT_BUDGET must be a positive integer value");
570    }
571
572    let value: usize = digits.parse().map_err(|err| {
573        anyhow!(
574            "MEMVID_LLM_CONTEXT_BUDGET value '{}' is not a valid number: {}",
575            trimmed,
576            err
577        )
578    })?;
579
580    if value == 0 {
581        bail!("MEMVID_LLM_CONTEXT_BUDGET must be a positive integer");
582    }
583
584    Ok(Some(value))
585}
586
587use crate::gemini_embeddings::GeminiEmbeddingProvider;
588use crate::mistral_embeddings::MistralEmbeddingProvider;
589use crate::nvidia_embeddings::NvidiaEmbeddingProvider;
590use crate::openai_embeddings::OpenAIEmbeddingProvider;
591
592/// Internal embedding backend - local fastembed or remote providers.
593#[derive(Clone)]
594enum EmbeddingBackend {
595    FastEmbed(std::sync::Arc<std::sync::Mutex<fastembed::TextEmbedding>>),
596    OpenAI(std::sync::Arc<OpenAIEmbeddingProvider>),
597    Nvidia(std::sync::Arc<NvidiaEmbeddingProvider>),
598    Gemini(std::sync::Arc<GeminiEmbeddingProvider>),
599    Mistral(std::sync::Arc<MistralEmbeddingProvider>),
600}
601
602/// Embedding runtime wrapper supporting local and remote embeddings
603#[derive(Clone)]
604pub struct EmbeddingRuntime {
605    backend: EmbeddingBackend,
606    model: EmbeddingModelChoice,
607    dimension: std::sync::Arc<AtomicUsize>,
608}
609
610impl EmbeddingRuntime {
611    fn new_fastembed(
612        backend: fastembed::TextEmbedding,
613        model: EmbeddingModelChoice,
614        dimension: usize,
615    ) -> Self {
616        Self {
617            backend: EmbeddingBackend::FastEmbed(std::sync::Arc::new(std::sync::Mutex::new(
618                backend,
619            ))),
620            model,
621            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
622        }
623    }
624
625    fn new_openai(
626        provider: OpenAIEmbeddingProvider,
627        model: EmbeddingModelChoice,
628        dimension: usize,
629    ) -> Self {
630        Self {
631            backend: EmbeddingBackend::OpenAI(std::sync::Arc::new(provider)),
632            model,
633            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
634        }
635    }
636
637    fn new_nvidia(provider: NvidiaEmbeddingProvider, model: EmbeddingModelChoice) -> Self {
638        Self {
639            backend: EmbeddingBackend::Nvidia(std::sync::Arc::new(provider)),
640            model,
641            dimension: std::sync::Arc::new(AtomicUsize::new(0)),
642        }
643    }
644
645    fn new_gemini(
646        provider: GeminiEmbeddingProvider,
647        model: EmbeddingModelChoice,
648        dimension: usize,
649    ) -> Self {
650        Self {
651            backend: EmbeddingBackend::Gemini(std::sync::Arc::new(provider)),
652            model,
653            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
654        }
655    }
656
657    fn new_mistral(
658        provider: MistralEmbeddingProvider,
659        model: EmbeddingModelChoice,
660        dimension: usize,
661    ) -> Self {
662        Self {
663            backend: EmbeddingBackend::Mistral(std::sync::Arc::new(provider)),
664            model,
665            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
666        }
667    }
668
669    const MAX_OPENAI_EMBEDDING_TEXT_LEN: usize = 20_000;
670    // NVIDIA Integrate embeddings enforce a 4096 token limit; use a tighter char cap as a guardrail.
671    const MAX_NVIDIA_EMBEDDING_TEXT_LEN: usize = 12_000;
672
673    // Gemini has an 8192 token limit, using conservative estimate
674    const MAX_GEMINI_EMBEDDING_TEXT_LEN: usize = 20_000;
675    // Mistral has an 8192 token limit, using conservative estimate
676    const MAX_MISTRAL_EMBEDDING_TEXT_LEN: usize = 20_000;
677
678    fn max_remote_embedding_chars(&self) -> usize {
679        match &self.backend {
680            EmbeddingBackend::OpenAI(_) => Self::MAX_OPENAI_EMBEDDING_TEXT_LEN,
681            EmbeddingBackend::Nvidia(_) => Self::MAX_NVIDIA_EMBEDDING_TEXT_LEN,
682            EmbeddingBackend::Gemini(_) => Self::MAX_GEMINI_EMBEDDING_TEXT_LEN,
683            EmbeddingBackend::Mistral(_) => Self::MAX_MISTRAL_EMBEDDING_TEXT_LEN,
684            EmbeddingBackend::FastEmbed(_) => usize::MAX,
685        }
686    }
687
688    /// Truncate text for embedding to reduce the risk of provider token-limit errors.
689    fn truncate_for_embedding<'a>(
690        text: &'a str,
691        max_chars: usize,
692    ) -> std::borrow::Cow<'a, str> {
693        if text.len() <= max_chars {
694            std::borrow::Cow::Borrowed(text)
695        } else {
696            // Find the last valid UTF-8 char boundary within the limit
697            let truncated = &text[..max_chars];
698            let end = truncated
699                .char_indices()
700                .rev()
701                .next()
702                .map(|(i, c)| i + c.len_utf8())
703                .unwrap_or(max_chars);
704            tracing::info!("Truncated embedding text from {} to {} chars", text.len(), end);
705            std::borrow::Cow::Owned(text[..end].to_string())
706        }
707    }
708
709    fn note_dimension(&self, observed: usize) -> Result<()> {
710        if observed == 0 {
711            return Err(anyhow!("embedding provider returned zero-length embedding"));
712        }
713
714        let current = self.dimension.load(Ordering::Relaxed);
715        if current == 0 {
716            self.dimension.store(observed, Ordering::Relaxed);
717            return Ok(());
718        }
719
720        if current != observed {
721            return Err(anyhow!(
722                "embedding provider returned {observed}D vectors but runtime expects {current}D"
723            ));
724        }
725
726        Ok(())
727    }
728
729    fn truncate_if_remote<'a>(&self, text: &'a str) -> std::borrow::Cow<'a, str> {
730        match &self.backend {
731            EmbeddingBackend::OpenAI(_)
732            | EmbeddingBackend::Nvidia(_)
733            | EmbeddingBackend::Gemini(_)
734            | EmbeddingBackend::Mistral(_) => {
735                Self::truncate_for_embedding(text, self.max_remote_embedding_chars())
736            }
737            EmbeddingBackend::FastEmbed(_) => std::borrow::Cow::Borrowed(text),
738        }
739    }
740
741    pub fn embed_passage(&self, text: &str) -> Result<Vec<f32>> {
742        let text = self.truncate_if_remote(text);
743        let embedding = match &self.backend {
744            EmbeddingBackend::FastEmbed(model) => {
745                let mut guard = model
746                    .lock()
747                    .map_err(|_| anyhow!("fastembed runtime poisoned"))?;
748                let outputs = guard
749                    .embed(vec![text.into_owned()], None)
750                    .map_err(|err| anyhow!("failed to compute embedding with fastembed: {err}"))?;
751                outputs
752                    .into_iter()
753                    .next()
754                    .ok_or_else(|| anyhow!("fastembed returned no embedding output"))?
755            }
756            EmbeddingBackend::OpenAI(provider) => {
757                use memvid_core::EmbeddingProvider;
758                provider
759                    .embed_text(&text)
760                    .map_err(|err| anyhow!("failed to compute embedding with OpenAI: {err}"))?
761            }
762            EmbeddingBackend::Nvidia(provider) => provider
763                .embed_passage(&text)
764                .map_err(|err| anyhow!("failed to compute embedding with NVIDIA: {err}"))?,
765            EmbeddingBackend::Gemini(provider) => provider
766                .embed_text(&text)
767                .map_err(|err| anyhow!("failed to compute embedding with Gemini: {err}"))?,
768            EmbeddingBackend::Mistral(provider) => provider
769                .embed_text(&text)
770                .map_err(|err| anyhow!("failed to compute embedding with Mistral: {err}"))?,
771        };
772
773        self.note_dimension(embedding.len())?;
774        Ok(embedding)
775    }
776
777    pub fn embed_query(&self, text: &str) -> Result<Vec<f32>> {
778        let text = self.truncate_if_remote(text);
779        match &self.backend {
780            EmbeddingBackend::Nvidia(provider) => {
781                let embedding = provider
782                    .embed_query(&text)
783                    .map_err(|err| anyhow!("failed to compute embedding with NVIDIA: {err}"))?;
784                self.note_dimension(embedding.len())?;
785                Ok(embedding)
786            }
787            _ => self.embed_passage(&text),
788        }
789    }
790
791    pub fn embed_batch_passages(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>> {
792        if texts.is_empty() {
793            return Ok(Vec::new());
794        }
795
796        let truncated: Vec<std::borrow::Cow<'_, str>> =
797            texts.iter().map(|t| self.truncate_if_remote(t)).collect();
798        let truncated_refs: Vec<&str> = truncated.iter().map(|c| c.as_ref()).collect();
799
800        let embeddings = match &self.backend {
801            EmbeddingBackend::FastEmbed(model) => {
802                let mut guard = model
803                    .lock()
804                    .map_err(|_| anyhow!("fastembed runtime poisoned"))?;
805                guard
806                    .embed(
807                        truncated_refs
808                            .iter()
809                            .map(|s| (*s).to_string())
810                            .collect::<Vec<String>>(),
811                        None,
812                    )
813                    .map_err(|err| anyhow!("failed to compute embeddings with fastembed: {err}"))?
814            }
815            EmbeddingBackend::OpenAI(provider) => {
816                use memvid_core::EmbeddingProvider;
817                provider
818                    .embed_batch(&truncated_refs)
819                    .map_err(|err| anyhow!("failed to compute embeddings with OpenAI: {err}"))?
820            }
821            EmbeddingBackend::Nvidia(provider) => provider
822                .embed_passages(&truncated_refs)
823                .map_err(|err| anyhow!("failed to compute embeddings with NVIDIA: {err}"))?,
824            EmbeddingBackend::Gemini(provider) => provider
825                .embed_batch(&truncated_refs)
826                .map_err(|err| anyhow!("failed to compute embeddings with Gemini: {err}"))?,
827            EmbeddingBackend::Mistral(provider) => provider
828                .embed_batch(&truncated_refs)
829                .map_err(|err| anyhow!("failed to compute embeddings with Mistral: {err}"))?,
830        };
831
832        if let Some(first) = embeddings.first() {
833            self.note_dimension(first.len())?;
834        }
835        if let Some(expected) = embeddings.first().map(|e| e.len()) {
836            if embeddings.iter().any(|e| e.len() != expected) {
837                return Err(anyhow!("embedding provider returned mixed vector dimensions"));
838            }
839        }
840
841        Ok(embeddings)
842    }
843
844    pub fn embed_batch_queries(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>> {
845        if texts.is_empty() {
846            return Ok(Vec::new());
847        }
848
849        let truncated: Vec<std::borrow::Cow<'_, str>> =
850            texts.iter().map(|t| self.truncate_if_remote(t)).collect();
851        let truncated_refs: Vec<&str> = truncated.iter().map(|c| c.as_ref()).collect();
852
853        match &self.backend {
854            EmbeddingBackend::Nvidia(provider) => {
855                let embeddings = provider
856                    .embed_queries(&truncated_refs)
857                    .map_err(|err| anyhow!("failed to compute embeddings with NVIDIA: {err}"))?;
858
859                if let Some(first) = embeddings.first() {
860                    self.note_dimension(first.len())?;
861                }
862                if let Some(expected) = embeddings.first().map(|e| e.len()) {
863                    if embeddings.iter().any(|e| e.len() != expected) {
864                        return Err(anyhow!("embedding provider returned mixed vector dimensions"));
865                    }
866                }
867
868                Ok(embeddings)
869            }
870            _ => self.embed_batch_passages(&truncated_refs),
871        }
872    }
873
874    pub fn dimension(&self) -> usize {
875        self.dimension.load(Ordering::Relaxed)
876    }
877
878    pub fn model_choice(&self) -> EmbeddingModelChoice {
879        self.model
880    }
881
882    pub fn provider_kind(&self) -> &'static str {
883        match &self.backend {
884            EmbeddingBackend::FastEmbed(_) => "fastembed",
885            EmbeddingBackend::OpenAI(_) => "openai",
886            EmbeddingBackend::Nvidia(_) => "nvidia",
887            EmbeddingBackend::Gemini(_) => "gemini",
888            EmbeddingBackend::Mistral(_) => "mistral",
889        }
890    }
891
892    pub fn provider_model_id(&self) -> String {
893        match &self.backend {
894            EmbeddingBackend::FastEmbed(_) => self.model.canonical_model_id().to_string(),
895            EmbeddingBackend::OpenAI(provider) => {
896                use memvid_core::EmbeddingProvider;
897                provider.model().to_string()
898            }
899            EmbeddingBackend::Nvidia(provider) => provider.model().to_string(),
900            EmbeddingBackend::Gemini(provider) => provider.model().to_string(),
901            EmbeddingBackend::Mistral(provider) => provider.model().to_string(),
902        }
903    }
904}
905
906impl memvid_core::VecEmbedder for EmbeddingRuntime {
907    fn embed_query(&self, text: &str) -> memvid_core::Result<Vec<f32>> {
908        EmbeddingRuntime::embed_query(self, text).map_err(|err| {
909            memvid_core::MemvidError::EmbeddingFailed {
910                reason: err.to_string().into_boxed_str(),
911            }
912        })
913    }
914
915    fn embedding_dimension(&self) -> usize {
916        self.dimension()
917    }
918}
919
920/// Ensure fastembed cache directory exists
921fn ensure_fastembed_cache(config: &CliConfig) -> Result<PathBuf> {
922    use std::fs;
923
924    let cache_dir = config.models_dir.clone();
925    fs::create_dir_all(&cache_dir)?;
926    Ok(cache_dir)
927}
928
929/// Get approximate model size in MB for user-friendly error messages
930fn model_size_mb(model: EmbeddingModelChoice) -> usize {
931    match model {
932        EmbeddingModelChoice::BgeSmall => 33,
933        EmbeddingModelChoice::BgeBase => 110,
934        EmbeddingModelChoice::Nomic => 137,
935        EmbeddingModelChoice::GteLarge => 327,
936        // Remote/cloud models don't require local download
937        EmbeddingModelChoice::OpenAILarge
938        | EmbeddingModelChoice::OpenAISmall
939        | EmbeddingModelChoice::OpenAIAda
940        | EmbeddingModelChoice::Nvidia
941        | EmbeddingModelChoice::Gemini
942        | EmbeddingModelChoice::Mistral => 0,
943    }
944}
945
946/// Instantiate an embedding runtime with the configured model
947fn instantiate_embedding_runtime(config: &CliConfig) -> Result<EmbeddingRuntime> {
948    use tracing::info;
949
950    let embedding_model = config.embedding_model;
951
952    if embedding_model.dimensions() > 0 {
953        info!(
954            "Loading embedding model: {} ({}D)",
955            embedding_model.name(),
956            embedding_model.dimensions()
957        );
958    } else {
959        info!("Loading embedding model: {}", embedding_model.name());
960    }
961
962    if config.offline && embedding_model.is_remote() {
963        anyhow::bail!(
964            "remote embeddings are unavailable while offline; set MEMVID_OFFLINE=0 or use a local embedding model"
965        );
966    }
967
968    // Check if OpenAI model
969    if embedding_model.is_openai() {
970        return instantiate_openai_runtime(embedding_model);
971    }
972
973    if embedding_model == EmbeddingModelChoice::Nvidia {
974        return instantiate_nvidia_runtime(None);
975    }
976
977    if embedding_model == EmbeddingModelChoice::Gemini {
978        return instantiate_gemini_runtime();
979    }
980
981    if embedding_model == EmbeddingModelChoice::Mistral {
982        return instantiate_mistral_runtime();
983    }
984
985    // Local fastembed model
986    instantiate_fastembed_runtime(config, embedding_model)
987}
988
989/// Instantiate OpenAI embedding runtime
990fn instantiate_openai_runtime(embedding_model: EmbeddingModelChoice) -> Result<EmbeddingRuntime> {
991    use anyhow::bail;
992    use memvid_core::EmbeddingConfig;
993    use tracing::info;
994
995    let api_key = std::env::var("OPENAI_API_KEY").map_err(|_| {
996        anyhow!("OPENAI_API_KEY environment variable is required for OpenAI embeddings")
997    })?;
998
999    if api_key.is_empty() {
1000        bail!("OPENAI_API_KEY cannot be empty");
1001    }
1002
1003    let config = match embedding_model {
1004        EmbeddingModelChoice::OpenAILarge => EmbeddingConfig::openai_large(),
1005        EmbeddingModelChoice::OpenAISmall => EmbeddingConfig::openai_small(),
1006        EmbeddingModelChoice::OpenAIAda => EmbeddingConfig::openai_ada(),
1007        _ => unreachable!("is_openai() should have been false"),
1008    };
1009
1010    let provider = OpenAIEmbeddingProvider::new(api_key, config.clone())
1011        .map_err(|err| anyhow!("failed to create OpenAI embedding provider: {err}"))?;
1012
1013    info!(
1014        "OpenAI embedding provider ready: model={}, dimension={}",
1015        config.model, config.dimension
1016    );
1017
1018    Ok(EmbeddingRuntime::new_openai(
1019        provider,
1020        embedding_model,
1021        config.dimension,
1022    ))
1023}
1024
1025fn normalize_nvidia_embedding_model_override(raw: &str) -> Option<String> {
1026    let trimmed = raw.trim();
1027    if trimmed.is_empty() {
1028        return None;
1029    }
1030
1031    let lowered = trimmed.to_ascii_lowercase();
1032    if lowered == "nvidia" || lowered == "nv" {
1033        return None;
1034    }
1035
1036    let without_prefix = trimmed
1037        .strip_prefix("nvidia:")
1038        .or_else(|| trimmed.strip_prefix("nv:"))
1039        .unwrap_or(trimmed)
1040        .trim();
1041
1042    if without_prefix.is_empty() {
1043        return None;
1044    }
1045
1046    if without_prefix.eq_ignore_ascii_case("nv-embed-v1") {
1047        return Some("nvidia/nv-embed-v1".to_string());
1048    }
1049
1050    if without_prefix.contains('/') {
1051        return Some(without_prefix.to_string());
1052    }
1053
1054    Some(format!("nvidia/{without_prefix}"))
1055}
1056
1057/// Instantiate NVIDIA embedding runtime
1058fn instantiate_nvidia_runtime(model_override: Option<&str>) -> Result<EmbeddingRuntime> {
1059    use tracing::info;
1060
1061    let normalized = model_override.and_then(normalize_nvidia_embedding_model_override);
1062    let provider = NvidiaEmbeddingProvider::from_env(normalized.as_deref())
1063        .map_err(|err| anyhow!("failed to create NVIDIA embedding provider: {err}"))?;
1064
1065    info!(
1066        "NVIDIA embedding provider ready: model={}",
1067        provider.model()
1068    );
1069
1070    Ok(EmbeddingRuntime::new_nvidia(
1071        provider,
1072        EmbeddingModelChoice::Nvidia,
1073    ))
1074}
1075
1076/// Instantiate Gemini embedding runtime
1077fn instantiate_gemini_runtime() -> Result<EmbeddingRuntime> {
1078    use tracing::info;
1079
1080    let provider = GeminiEmbeddingProvider::from_env()
1081        .map_err(|err| anyhow!("failed to create Gemini embedding provider: {err}"))?;
1082
1083    let dimension = provider.dimension();
1084    info!(
1085        "Gemini embedding provider ready: model={}, dimension={}",
1086        provider.model(),
1087        dimension
1088    );
1089
1090    Ok(EmbeddingRuntime::new_gemini(
1091        provider,
1092        EmbeddingModelChoice::Gemini,
1093        dimension,
1094    ))
1095}
1096
1097/// Instantiate Mistral embedding runtime
1098fn instantiate_mistral_runtime() -> Result<EmbeddingRuntime> {
1099    use tracing::info;
1100
1101    let provider = MistralEmbeddingProvider::from_env()
1102        .map_err(|err| anyhow!("failed to create Mistral embedding provider: {err}"))?;
1103
1104    let dimension = provider.dimension();
1105    info!(
1106        "Mistral embedding provider ready: model={}, dimension={}",
1107        provider.model(),
1108        dimension
1109    );
1110
1111    Ok(EmbeddingRuntime::new_mistral(
1112        provider,
1113        EmbeddingModelChoice::Mistral,
1114        dimension,
1115    ))
1116}
1117
1118/// Instantiate fastembed (local) embedding runtime
1119fn instantiate_fastembed_runtime(
1120    config: &CliConfig,
1121    embedding_model: EmbeddingModelChoice,
1122) -> Result<EmbeddingRuntime> {
1123    use anyhow::bail;
1124    use fastembed::{InitOptions, TextEmbedding};
1125    use std::fs;
1126
1127    let cache_dir = ensure_fastembed_cache(config)?;
1128
1129    if config.offline {
1130        let mut entries = fs::read_dir(&cache_dir)?;
1131        if entries.next().is_none() {
1132            bail!(
1133                "semantic embeddings unavailable while offline; allow one connected run so fastembed can cache model weights"
1134            );
1135        }
1136    }
1137
1138    let options = InitOptions::new(embedding_model.to_fastembed_model())
1139        .with_cache_dir(cache_dir)
1140        .with_show_download_progress(true);
1141    let mut model = TextEmbedding::try_new(options).map_err(|err| {
1142        // Provide platform-specific guidance for model download issues
1143        let platform_hint = if cfg!(target_os = "windows") {
1144            "\n\nWindows users: If model downloads fail, try:\n\
1145            1. Run as Administrator\n\
1146            2. Check your antivirus isn't blocking downloads\n\
1147            3. Use OpenAI embeddings instead: set OPENAI_API_KEY and use --embedding-model openai"
1148        } else if cfg!(target_os = "linux") {
1149            "\n\nLinux users: If model downloads fail, try:\n\
1150            1. Check disk space in ~/.memvid/models\n\
1151            2. Ensure you have network access to huggingface.co\n\
1152            3. Use OpenAI embeddings instead: export OPENAI_API_KEY=... and use --embedding-model openai"
1153        } else {
1154            "\n\nIf model downloads fail, try using OpenAI embeddings:\n\
1155            export OPENAI_API_KEY=your-key && memvid ... --embedding-model openai"
1156        };
1157
1158        anyhow!(
1159            "Failed to initialize embedding model '{}': {err}\n\n\
1160            This typically means the model couldn't be downloaded or loaded.\n\
1161            Model size: ~{} MB{}\n\n\
1162            See: https://docs.memvid.com/embedding-models",
1163            embedding_model.name(),
1164            model_size_mb(embedding_model),
1165            platform_hint
1166        )
1167    })?;
1168
1169    let probe = model
1170        .embed(vec!["memvid probe".to_string()], None)
1171        .map_err(|err| anyhow!("failed to determine embedding dimension: {err}"))?;
1172    let dimension = probe.first().map(|vec| vec.len()).unwrap_or(0);
1173
1174    if dimension == 0 {
1175        bail!("fastembed reported zero-length embeddings");
1176    }
1177
1178    // Verify dimension matches expected
1179    if dimension != embedding_model.dimensions() {
1180        tracing::warn!(
1181            "Embedding dimension mismatch: expected {}, got {}",
1182            embedding_model.dimensions(),
1183            dimension
1184        );
1185    }
1186
1187    Ok(EmbeddingRuntime::new_fastembed(model, embedding_model, dimension))
1188}
1189
1190/// Load embedding runtime (fails if unavailable)
1191pub fn load_embedding_runtime(config: &CliConfig) -> Result<EmbeddingRuntime> {
1192    use anyhow::bail;
1193
1194    match instantiate_embedding_runtime(config) {
1195        Ok(runtime) => Ok(runtime),
1196        Err(err) => {
1197            if config.offline {
1198                bail!(
1199                    "semantic embeddings unavailable while offline; allow one connected run so fastembed can cache model weights ({err})"
1200                );
1201            }
1202            Err(err)
1203        }
1204    }
1205}
1206
1207/// Try to load embedding runtime (returns None if unavailable)
1208pub fn try_load_embedding_runtime(config: &CliConfig) -> Option<EmbeddingRuntime> {
1209    use tracing::warn;
1210
1211    match instantiate_embedding_runtime(config) {
1212        Ok(runtime) => Some(runtime),
1213        Err(err) => {
1214            warn!("semantic embeddings unavailable: {err}");
1215            None
1216        }
1217    }
1218}
1219
1220/// Load embedding runtime with an optional model override.
1221/// If `model_override` is provided, it will be used instead of the config's embedding_model.
1222pub fn load_embedding_runtime_with_model(
1223    config: &CliConfig,
1224    model_override: Option<&str>,
1225) -> Result<EmbeddingRuntime> {
1226    use tracing::info;
1227
1228    let mut raw_override: Option<&str> = None;
1229    let embedding_model = match model_override {
1230        Some(model_str) => {
1231            raw_override = Some(model_str);
1232            let parsed = model_str.parse::<EmbeddingModelChoice>()?;
1233            if parsed.dimensions() > 0 {
1234                info!(
1235                    "Using embedding model override: {} ({}D)",
1236                    parsed.name(),
1237                    parsed.dimensions()
1238                );
1239            } else {
1240                info!("Using embedding model override: {}", parsed.name());
1241            }
1242            parsed
1243        }
1244        None => config.embedding_model,
1245    };
1246
1247    if embedding_model.dimensions() > 0 {
1248        info!(
1249            "Loading embedding model: {} ({}D)",
1250            embedding_model.name(),
1251            embedding_model.dimensions()
1252        );
1253    } else {
1254        info!("Loading embedding model: {}", embedding_model.name());
1255    }
1256
1257    if config.offline && embedding_model.is_remote() {
1258        anyhow::bail!(
1259            "remote embeddings are unavailable while offline; set MEMVID_OFFLINE=0 or use a local embedding model"
1260        );
1261    }
1262
1263    if embedding_model.is_openai() {
1264        return instantiate_openai_runtime(embedding_model);
1265    }
1266
1267    if embedding_model == EmbeddingModelChoice::Nvidia {
1268        return instantiate_nvidia_runtime(raw_override);
1269    }
1270
1271    if embedding_model == EmbeddingModelChoice::Gemini {
1272        return instantiate_gemini_runtime();
1273    }
1274
1275    if embedding_model == EmbeddingModelChoice::Mistral {
1276        return instantiate_mistral_runtime();
1277    }
1278
1279    instantiate_fastembed_runtime(config, embedding_model)
1280}
1281
1282/// Try to load embedding runtime with model override (returns None if unavailable)
1283pub fn try_load_embedding_runtime_with_model(
1284    config: &CliConfig,
1285    model_override: Option<&str>,
1286) -> Option<EmbeddingRuntime> {
1287    use tracing::warn;
1288
1289    match load_embedding_runtime_with_model(config, model_override) {
1290        Ok(runtime) => Some(runtime),
1291        Err(err) => {
1292            warn!("semantic embeddings unavailable: {err}");
1293            None
1294        }
1295    }
1296}
1297
1298/// Load embedding runtime by auto-detecting from MV2 vector dimension.
1299///
1300/// Priority:
1301/// 1. Explicit model override (--query-embedding-model flag)
1302/// 2. Auto-detect from MV2 file's stored dimension
1303/// 3. Fall back to config default
1304///
1305/// This allows users to omit --query-embedding-model when querying files
1306/// created with non-default embedding models (like OpenAI).
1307pub fn load_embedding_runtime_for_mv2(
1308    config: &CliConfig,
1309    model_override: Option<&str>,
1310    mv2_dimension: Option<u32>,
1311) -> Result<EmbeddingRuntime> {
1312    use tracing::info;
1313
1314    // Priority 1: Explicit override
1315    if let Some(model_str) = model_override {
1316        return load_embedding_runtime_with_model(config, Some(model_str));
1317    }
1318
1319    // Priority 2: Auto-detect from MV2 dimension
1320    if let Some(dim) = mv2_dimension {
1321        if let Some(detected_model) = EmbeddingModelChoice::from_dimension(dim) {
1322            info!(
1323                "Auto-detected embedding model from MV2: {} ({}D)",
1324                detected_model.name(),
1325                dim
1326            );
1327
1328            // For OpenAI models, check if API key is available
1329            if detected_model.is_openai() {
1330                if std::env::var("OPENAI_API_KEY").is_ok() {
1331                    return load_embedding_runtime_with_model(config, Some(detected_model.name()));
1332                } else {
1333                    // OpenAI detected but no API key - provide helpful error
1334                    return Err(anyhow!(
1335                        "MV2 file uses OpenAI embeddings ({}D) but OPENAI_API_KEY is not set.\n\n\
1336                        Options:\n\
1337                        1. Set OPENAI_API_KEY environment variable\n\
1338                        2. Use --query-embedding-model to specify a different model\n\
1339                        3. Use lexical-only search with --mode lex\n\n\
1340                        See: https://docs.memvid.com/embedding-models",
1341                        dim
1342                    ));
1343                }
1344            }
1345
1346            return load_embedding_runtime_with_model(config, Some(detected_model.name()));
1347        }
1348    }
1349
1350    // Priority 3: Fall back to config default
1351    load_embedding_runtime(config)
1352}
1353
1354/// Try to load embedding runtime for MV2 with auto-detection (returns None if unavailable)
1355pub fn try_load_embedding_runtime_for_mv2(
1356    config: &CliConfig,
1357    model_override: Option<&str>,
1358    mv2_dimension: Option<u32>,
1359) -> Option<EmbeddingRuntime> {
1360    use tracing::warn;
1361
1362    match load_embedding_runtime_for_mv2(config, model_override, mv2_dimension) {
1363        Ok(runtime) => Some(runtime),
1364        Err(err) => {
1365            warn!("semantic embeddings unavailable: {err}");
1366            None
1367        }
1368    }
1369}