memvid_cli/
config.rs

1//! CLI configuration and environment handling
2//!
3//! This module provides configuration loading from environment variables,
4//! tracing initialization, and embedding runtime management for semantic search.
5
6use std::env;
7use std::path::PathBuf;
8use std::str::FromStr;
9use std::sync::atomic::{AtomicUsize, Ordering};
10
11use anyhow::{anyhow, Result};
12use ed25519_dalek::VerifyingKey;
13
14const DEFAULT_API_URL: &str = "https://memvid.com";
15const DEFAULT_CACHE_DIR: &str = "~/.cache/memvid";
16
17/// Supported embedding models for semantic search
18#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
19pub enum EmbeddingModelChoice {
20    /// BGE-small-en-v1.5: Fast, 384-dim, ~78% accuracy (default)
21    #[default]
22    BgeSmall,
23    /// BGE-base-en-v1.5: Balanced, 768-dim, ~85% accuracy
24    BgeBase,
25    /// Nomic-embed-text-v1.5: High accuracy, 768-dim, ~86% accuracy
26    Nomic,
27    /// GTE-large-en-v1.5: Best semantic depth, 1024-dim
28    GteLarge,
29    /// OpenAI text-embedding-3-large: Highest quality, 3072-dim (requires OPENAI_API_KEY)
30    OpenAILarge,
31    /// OpenAI text-embedding-3-small: Good quality, 1536-dim (requires OPENAI_API_KEY)
32    OpenAISmall,
33    /// OpenAI text-embedding-ada-002: Legacy model, 1536-dim (requires OPENAI_API_KEY)
34    OpenAIAda,
35    /// NVIDIA nv-embed-v1: High quality, remote embeddings (requires NVIDIA_API_KEY)
36    Nvidia,
37    /// Gemini text-embedding-004: Google AI embeddings, 768-dim (requires GOOGLE_API_KEY or GEMINI_API_KEY)
38    Gemini,
39    /// Mistral mistral-embed: Mistral AI embeddings, 1024-dim (requires MISTRAL_API_KEY)
40    Mistral,
41}
42
43impl EmbeddingModelChoice {
44    /// Check if this is an OpenAI model (requires OPENAI_API_KEY)
45    pub fn is_openai(&self) -> bool {
46        matches!(
47            self,
48            EmbeddingModelChoice::OpenAILarge
49                | EmbeddingModelChoice::OpenAISmall
50                | EmbeddingModelChoice::OpenAIAda
51        )
52    }
53
54    /// Check if this is a remote/cloud model (not local fastembed)
55    pub fn is_remote(&self) -> bool {
56        matches!(
57            self,
58            EmbeddingModelChoice::OpenAILarge
59                | EmbeddingModelChoice::OpenAISmall
60                | EmbeddingModelChoice::OpenAIAda
61                | EmbeddingModelChoice::Nvidia
62                | EmbeddingModelChoice::Gemini
63                | EmbeddingModelChoice::Mistral
64        )
65    }
66
67    /// Get the fastembed EmbeddingModel enum value (only for local models)
68    ///
69    /// # Panics
70    /// Panics if called on an OpenAI model. Use `is_openai()` to check first.
71    #[cfg(feature = "local-embeddings")]
72    pub fn to_fastembed_model(&self) -> fastembed::EmbeddingModel {
73        match self {
74            EmbeddingModelChoice::BgeSmall => fastembed::EmbeddingModel::BGESmallENV15,
75            EmbeddingModelChoice::BgeBase => fastembed::EmbeddingModel::BGEBaseENV15,
76            EmbeddingModelChoice::Nomic => fastembed::EmbeddingModel::NomicEmbedTextV15,
77            EmbeddingModelChoice::GteLarge => fastembed::EmbeddingModel::GTELargeENV15,
78            EmbeddingModelChoice::OpenAILarge
79            | EmbeddingModelChoice::OpenAISmall
80            | EmbeddingModelChoice::OpenAIAda => {
81                panic!("OpenAI models don't use fastembed. Check is_remote() first.")
82            }
83            EmbeddingModelChoice::Nvidia => {
84                panic!("NVIDIA embeddings don't use fastembed. Check is_remote() first.")
85            }
86            EmbeddingModelChoice::Gemini => {
87                panic!("Gemini embeddings don't use fastembed. Check is_remote() first.")
88            }
89            EmbeddingModelChoice::Mistral => {
90                panic!("Mistral embeddings don't use fastembed. Check is_remote() first.")
91            }
92        }
93    }
94
95    /// Get human-readable model name
96    pub fn name(&self) -> &'static str {
97        match self {
98            EmbeddingModelChoice::BgeSmall => "bge-small",
99            EmbeddingModelChoice::BgeBase => "bge-base",
100            EmbeddingModelChoice::Nomic => "nomic",
101            EmbeddingModelChoice::GteLarge => "gte-large",
102            EmbeddingModelChoice::OpenAILarge => "openai-large",
103            EmbeddingModelChoice::OpenAISmall => "openai-small",
104            EmbeddingModelChoice::OpenAIAda => "openai-ada",
105            EmbeddingModelChoice::Nvidia => "nvidia",
106            EmbeddingModelChoice::Gemini => "gemini",
107            EmbeddingModelChoice::Mistral => "mistral",
108        }
109    }
110
111    /// Get the canonical provider model identifier used for persisted metadata.
112    ///
113    /// This is intended to match upstream provider IDs (OpenAI) and HuggingFace-style IDs
114    /// (fastembed/ONNX) so that memories can record an embedding "identity" that other
115    /// runtimes can select deterministically.
116    pub fn canonical_model_id(&self) -> &'static str {
117        match self {
118            EmbeddingModelChoice::BgeSmall => "BAAI/bge-small-en-v1.5",
119            EmbeddingModelChoice::BgeBase => "BAAI/bge-base-en-v1.5",
120            EmbeddingModelChoice::Nomic => "nomic-embed-text-v1.5",
121            EmbeddingModelChoice::GteLarge => "thenlper/gte-large",
122            EmbeddingModelChoice::OpenAILarge => "text-embedding-3-large",
123            EmbeddingModelChoice::OpenAISmall => "text-embedding-3-small",
124            EmbeddingModelChoice::OpenAIAda => "text-embedding-ada-002",
125            EmbeddingModelChoice::Nvidia => "nvidia/nv-embed-v1",
126            EmbeddingModelChoice::Gemini => "text-embedding-004",
127            EmbeddingModelChoice::Mistral => "mistral-embed",
128        }
129    }
130
131    /// Get embedding dimensions
132    pub fn dimensions(&self) -> usize {
133        match self {
134            EmbeddingModelChoice::BgeSmall => 384,
135            EmbeddingModelChoice::BgeBase => 768,
136            EmbeddingModelChoice::Nomic => 768,
137            EmbeddingModelChoice::GteLarge => 1024,
138            EmbeddingModelChoice::OpenAILarge => 3072,
139            EmbeddingModelChoice::OpenAISmall => 1536,
140            EmbeddingModelChoice::OpenAIAda => 1536,
141            // Remote model; infer from the first embedding response.
142            EmbeddingModelChoice::Nvidia => 0,
143            EmbeddingModelChoice::Gemini => 768,
144            EmbeddingModelChoice::Mistral => 1024,
145        }
146    }
147}
148
149impl FromStr for EmbeddingModelChoice {
150    type Err = anyhow::Error;
151
152    fn from_str(s: &str) -> Result<Self> {
153        let lowered = s.trim().to_ascii_lowercase();
154        match lowered.as_str() {
155            "bge-small" | "bge_small" | "bgesmall" | "small" => Ok(EmbeddingModelChoice::BgeSmall),
156            "baai/bge-small-en-v1.5" => Ok(EmbeddingModelChoice::BgeSmall),
157            "bge-base" | "bge_base" | "bgebase" | "base" => Ok(EmbeddingModelChoice::BgeBase),
158            "baai/bge-base-en-v1.5" => Ok(EmbeddingModelChoice::BgeBase),
159            "nomic" | "nomic-embed" | "nomic_embed" => Ok(EmbeddingModelChoice::Nomic),
160            "nomic-embed-text-v1.5" => Ok(EmbeddingModelChoice::Nomic),
161            "gte-large" | "gte_large" | "gtelarge" | "gte" => Ok(EmbeddingModelChoice::GteLarge),
162            "thenlper/gte-large" => Ok(EmbeddingModelChoice::GteLarge),
163            // OpenAI models - default "openai" maps to "openai-large" for highest quality
164            "openai" | "openai-large" | "openai_large" | "text-embedding-3-large" => {
165                Ok(EmbeddingModelChoice::OpenAILarge)
166            }
167            "openai-small" | "openai_small" | "text-embedding-3-small" => {
168                Ok(EmbeddingModelChoice::OpenAISmall)
169            }
170            "openai-ada" | "openai_ada" | "text-embedding-ada-002" | "ada" => {
171                Ok(EmbeddingModelChoice::OpenAIAda)
172            }
173            "nvidia" | "nv" | "nv-embed-v1" | "nvidia/nv-embed-v1" => Ok(EmbeddingModelChoice::Nvidia),
174            _ if lowered.starts_with("nvidia/") || lowered.starts_with("nvidia:") || lowered.starts_with("nv:") => {
175                Ok(EmbeddingModelChoice::Nvidia)
176            }
177            // Gemini embeddings
178            "gemini" | "gemini-embed" | "text-embedding-004" | "gemini-embedding-001" => {
179                Ok(EmbeddingModelChoice::Gemini)
180            }
181            _ if lowered.starts_with("gemini/") || lowered.starts_with("gemini:") || lowered.starts_with("google:") => {
182                Ok(EmbeddingModelChoice::Gemini)
183            }
184            // Mistral embeddings
185            "mistral" | "mistral-embed" => Ok(EmbeddingModelChoice::Mistral),
186            _ if lowered.starts_with("mistral/") || lowered.starts_with("mistral:") => {
187                Ok(EmbeddingModelChoice::Mistral)
188            }
189            _ => Err(anyhow!(
190                "unknown embedding model '{}'. Valid options: bge-small, bge-base, nomic, gte-large, openai, openai-small, openai-ada, nvidia, gemini, mistral",
191                s
192            )),
193        }
194    }
195}
196
197impl std::fmt::Display for EmbeddingModelChoice {
198    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
199        write!(f, "{}", self.name())
200    }
201}
202
203impl EmbeddingModelChoice {
204    /// Infer the best embedding model from vector dimension stored in MV2 file.
205    ///
206    /// This enables auto-detection: users don't need to specify --query-embedding-model
207    /// if the MV2 file has vectors. The dimension uniquely identifies the model family.
208    ///
209    /// # Dimension Mapping
210    /// - 384  → BGE-small (default local model)
211    /// - 768  → BGE-base (could also be Nomic, but same dimension works)
212    /// - 1024 → GTE-large
213    /// - 1536 → OpenAI small/ada
214    /// - 3072 → OpenAI large
215    pub fn from_dimension(dim: u32) -> Option<Self> {
216        match dim {
217            384 => Some(EmbeddingModelChoice::BgeSmall),
218            768 => Some(EmbeddingModelChoice::BgeBase), // Could be Nomic, but same dim
219            1024 => Some(EmbeddingModelChoice::GteLarge),
220            1536 => Some(EmbeddingModelChoice::OpenAISmall), // Could be Ada, same dim
221            3072 => Some(EmbeddingModelChoice::OpenAILarge),
222            0 => None, // No vectors in file
223            _ => {
224                tracing::warn!("Unknown embedding dimension {}, using default model", dim);
225                None
226            }
227        }
228    }
229}
230
231/// CLI configuration loaded from environment variables and config file
232#[derive(Debug, Clone)]
233pub struct CliConfig {
234    pub api_key: Option<String>,
235    pub api_url: String,
236    /// Default memory ID for dashboard sync (from config file)
237    pub memory_id: Option<String>,
238    pub cache_dir: PathBuf,
239    pub ticket_pubkey: Option<VerifyingKey>,
240    pub models_dir: PathBuf,
241    pub offline: bool,
242    /// Embedding model for semantic search (can be overridden by CLI flag)
243    pub embedding_model: EmbeddingModelChoice,
244}
245
246impl PartialEq for CliConfig {
247    fn eq(&self, other: &Self) -> bool {
248        self.api_key == other.api_key
249            && self.api_url == other.api_url
250            && self.memory_id == other.memory_id
251            && self.cache_dir == other.cache_dir
252            && self.models_dir == other.models_dir
253            && self.offline == other.offline
254            && self.embedding_model == other.embedding_model
255    }
256}
257
258impl Eq for CliConfig {}
259
260impl CliConfig {
261    pub fn load() -> Result<Self> {
262        // Load persistent config file (if exists) for fallback values
263        let persistent_config = crate::commands::config::PersistentConfig::load().ok();
264
265        // API Key: env var takes precedence, then config file
266        let api_key = env::var("MEMVID_API_KEY")
267            .ok()
268            .and_then(|value| {
269                let trimmed = value.trim().to_string();
270                (!trimmed.is_empty()).then_some(trimmed)
271            })
272            .or_else(|| persistent_config.as_ref().and_then(|c| c.api_key.clone()));
273
274        // API URL: env var takes precedence, then config file, then default
275        let api_url = env::var("MEMVID_API_URL")
276            .ok()
277            .or_else(|| persistent_config.as_ref().and_then(|c| c.api_url.clone()))
278            .unwrap_or_else(|| DEFAULT_API_URL.to_string());
279
280        // Memory ID: env var takes precedence, then config file (memory.default or legacy memory_id)
281        let memory_id = env::var("MEMVID_MEMORY_ID")
282            .ok()
283            .and_then(|value| {
284                let trimmed = value.trim().to_string();
285                (!trimmed.is_empty()).then_some(trimmed)
286            })
287            .or_else(|| persistent_config.as_ref().and_then(|c| c.default_memory_id()));
288
289        let cache_dir_raw =
290            env::var("MEMVID_CACHE_DIR").unwrap_or_else(|_| DEFAULT_CACHE_DIR.to_string());
291        let cache_dir = expand_path(&cache_dir_raw)?;
292
293        let models_dir_raw =
294            env::var("MEMVID_MODELS_DIR").unwrap_or_else(|_| "~/.memvid/models".to_string());
295        let models_dir = expand_path(&models_dir_raw)?;
296
297        // Default public key for memvid.com dashboard ticket verification
298        // This allows users to use --memory-id without setting MEMVID_TICKET_PUBKEY
299        // Must match memvid-core's MEMVID_TICKET_PUBKEY constant
300        const DEFAULT_TICKET_PUBKEY: &str = "DFKNhP/yO5i1b9aKL+aHeBaGunz9sMfOF736fzYws4Q=";
301
302        let ticket_pubkey_str = env::var("MEMVID_TICKET_PUBKEY")
303            .ok()
304            .and_then(|value| {
305                let trimmed = value.trim();
306                if trimmed.is_empty() {
307                    None
308                } else {
309                    Some(trimmed.to_string())
310                }
311            })
312            .unwrap_or_else(|| DEFAULT_TICKET_PUBKEY.to_string());
313
314        let ticket_pubkey = Some(memvid_core::parse_ed25519_public_key_base64(&ticket_pubkey_str)?);
315
316        let offline = env::var("MEMVID_OFFLINE")
317            .ok()
318            .map(|value| match value.trim().to_ascii_lowercase().as_str() {
319                "1" | "true" | "yes" => true,
320                _ => false,
321            })
322            .unwrap_or(false);
323
324        // Load embedding model from env var, default to BGE-small
325        let embedding_model = env::var("MEMVID_EMBEDDING_MODEL")
326            .ok()
327            .and_then(|value| {
328                let trimmed = value.trim();
329                if trimmed.is_empty() {
330                    None
331                } else {
332                    EmbeddingModelChoice::from_str(trimmed).ok()
333                }
334            })
335            .unwrap_or_default();
336
337        Ok(Self {
338            api_key,
339            api_url,
340            memory_id,
341            cache_dir,
342            ticket_pubkey,
343            models_dir,
344            offline,
345            embedding_model,
346        })
347    }
348
349    /// Create a new config with a different embedding model
350    pub fn with_embedding_model(&self, model: EmbeddingModelChoice) -> Self {
351        Self {
352            embedding_model: model,
353            ..self.clone()
354        }
355    }
356}
357
358fn expand_path(value: &str) -> Result<PathBuf> {
359    if value.trim().is_empty() {
360        return Err(anyhow!("cache directory cannot be empty"));
361    }
362
363    let expanded = if let Some(stripped) = value.strip_prefix("~/") {
364        home_dir()?.join(stripped)
365    } else if let Some(stripped) = value.strip_prefix("~\\") {
366        // Support Windows-style "~\" prefix.
367        home_dir()?.join(stripped)
368    } else if value == "~" {
369        home_dir()?
370    } else {
371        PathBuf::from(value)
372    };
373
374    if expanded.is_absolute() {
375        Ok(expanded)
376    } else {
377        Ok(env::current_dir()?.join(expanded))
378    }
379}
380
381fn home_dir() -> Result<PathBuf> {
382    if let Some(path) = env::var_os("HOME") {
383        if !path.is_empty() {
384            return Ok(PathBuf::from(path));
385        }
386    }
387
388    #[cfg(windows)]
389    {
390        if let Some(path) = env::var_os("USERPROFILE") {
391            if !path.is_empty() {
392                return Ok(PathBuf::from(path));
393            }
394        }
395        if let (Some(drive), Some(path)) = (env::var_os("HOMEDRIVE"), env::var_os("HOMEPATH")) {
396            if !drive.is_empty() && !path.is_empty() {
397                return Ok(PathBuf::from(format!(
398                    "{}{}",
399                    drive.to_string_lossy(),
400                    path.to_string_lossy()
401                )));
402            }
403        }
404    }
405
406    Err(anyhow!("unable to resolve home directory"))
407}
408
409#[cfg(test)]
410mod tests {
411    use super::*;
412    use base64::engine::general_purpose::STANDARD as BASE64_STANDARD;
413    use base64::Engine;
414    use ed25519_dalek::SigningKey;
415    use std::sync::{Mutex, OnceLock};
416
417    fn env_lock() -> std::sync::MutexGuard<'static, ()> {
418        static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
419        LOCK.get_or_init(|| Mutex::new(())).lock().unwrap()
420    }
421
422    fn set_or_unset(var: &str, value: Option<String>) {
423        match value {
424            Some(v) => unsafe { env::set_var(var, v) },
425            None => unsafe { env::remove_var(var) },
426        }
427    }
428
429    #[test]
430    fn defaults_expand_using_home_directory() {
431        let _guard = env_lock();
432
433        let previous_home = env::var("HOME").ok();
434        #[cfg(windows)]
435        let previous_userprofile = env::var("USERPROFILE").ok();
436
437        for var in [
438            "MEMVID_API_KEY",
439            "MEMVID_API_URL",
440            "MEMVID_CACHE_DIR",
441            "MEMVID_TICKET_PUBKEY",
442            "MEMVID_MODELS_DIR",
443            "MEMVID_OFFLINE",
444        ] {
445            unsafe { env::remove_var(var) };
446        }
447
448        let tmp = tempfile::tempdir().expect("tmpdir");
449        let tmp_path = tmp.path().to_path_buf();
450        unsafe { env::set_var("HOME", &tmp_path) };
451        #[cfg(windows)]
452        unsafe {
453            env::set_var("USERPROFILE", &tmp_path)
454        };
455
456        let config = CliConfig::load().expect("load");
457        assert_eq!(config.api_key, None);
458        assert_eq!(config.api_url, "https://memvid.com");
459        assert_eq!(config.cache_dir, tmp_path.join(".cache/memvid"));
460        // ticket_pubkey has a default value now, so it should be Some
461        assert!(config.ticket_pubkey.is_some());
462        assert_eq!(config.models_dir, tmp_path.join(".memvid/models"));
463        assert!(!config.offline);
464
465        set_or_unset("HOME", previous_home);
466        #[cfg(windows)]
467        {
468            set_or_unset("USERPROFILE", previous_userprofile);
469        }
470    }
471
472    #[test]
473    fn env_overrides_are_respected() {
474        let _guard = env_lock();
475
476        let previous_env: Vec<(&'static str, Option<String>)> = [
477            "MEMVID_API_KEY",
478            "MEMVID_API_URL",
479            "MEMVID_CACHE_DIR",
480            "MEMVID_TICKET_PUBKEY",
481            "MEMVID_MODELS_DIR",
482            "MEMVID_OFFLINE",
483        ]
484        .into_iter()
485        .map(|var| (var, env::var(var).ok()))
486        .collect();
487
488        unsafe { env::set_var("MEMVID_API_KEY", "abc123") };
489        unsafe { env::set_var("MEMVID_API_URL", "https://staging.memvid.app") };
490        unsafe { env::set_var("MEMVID_CACHE_DIR", "~/memvid-cache") };
491        unsafe { env::set_var("MEMVID_MODELS_DIR", "~/models") };
492        unsafe { env::set_var("MEMVID_OFFLINE", "true") };
493        let signing = SigningKey::from_bytes(&[9u8; 32]);
494        let encoded = BASE64_STANDARD.encode(signing.verifying_key().as_bytes());
495        unsafe { env::set_var("MEMVID_TICKET_PUBKEY", encoded) };
496
497        let tmp = tempfile::tempdir().expect("tmpdir");
498        let tmp_path = tmp.path().to_path_buf();
499        unsafe { env::set_var("HOME", &tmp_path) };
500        #[cfg(windows)]
501        unsafe {
502            env::set_var("USERPROFILE", &tmp_path)
503        };
504
505        let config = CliConfig::load().expect("load");
506        assert_eq!(config.api_key.as_deref(), Some("abc123"));
507        assert_eq!(config.api_url, "https://staging.memvid.app");
508        assert_eq!(config.cache_dir, tmp_path.join("memvid-cache"));
509        assert_eq!(
510            config.ticket_pubkey.expect("pubkey").as_bytes(),
511            signing.verifying_key().as_bytes()
512        );
513        assert_eq!(config.models_dir, tmp_path.join("models"));
514        assert!(config.offline);
515
516        for (var, value) in previous_env {
517            set_or_unset(var, value);
518        }
519    }
520
521    #[test]
522    fn rejects_empty_cache_dir() {
523        let _guard = env_lock();
524
525        let previous = env::var("MEMVID_CACHE_DIR").ok();
526        unsafe { env::set_var("MEMVID_CACHE_DIR", " ") };
527        let err = CliConfig::load().expect_err("should fail");
528        assert!(err.to_string().contains("cache directory"));
529        set_or_unset("MEMVID_CACHE_DIR", previous);
530    }
531}
532
533/// Initialize tracing/logging based on verbosity level
534pub fn init_tracing(verbosity: u8) -> Result<()> {
535    use std::io::IsTerminal;
536    use tracing_subscriber::{filter::Directive, fmt, EnvFilter};
537
538    let level = match verbosity {
539        0 => "warn",
540        1 => "info",
541        2 => "debug",
542        _ => "trace",
543    };
544
545    let mut env_filter =
546        EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new(level));
547    for directive_str in ["llama_cpp=error", "llama_cpp_sys=error", "ggml=error"] {
548        if let Ok(directive) = directive_str.parse::<Directive>() {
549            env_filter = env_filter.add_directive(directive);
550        }
551    }
552
553    // Disable ANSI color codes when stderr is not a terminal (e.g., piped or
554    // combined with `2>&1`). This prevents control characters from polluting
555    // JSON output when combined with stdout.
556    let use_ansi = std::io::stderr().is_terminal();
557
558    fmt()
559        .with_env_filter(env_filter)
560        .with_writer(std::io::stderr)
561        .with_target(false)
562        .without_time()
563        .with_ansi(use_ansi)
564        .try_init()
565        .map_err(|err| anyhow!(err))?;
566    Ok(())
567}
568
569/// Resolve LLM context budget override from CLI or environment
570pub fn resolve_llm_context_budget_override(cli_value: Option<usize>) -> Result<Option<usize>> {
571    use anyhow::bail;
572
573    if let Some(value) = cli_value {
574        if value == 0 {
575            bail!("--llm-context-depth must be a positive integer");
576        }
577        return Ok(Some(value));
578    }
579
580    let raw_env = match env::var("MEMVID_LLM_CONTEXT_BUDGET") {
581        Ok(value) => value,
582        Err(_) => return Ok(None),
583    };
584
585    let trimmed = raw_env.trim();
586    if trimmed.is_empty() {
587        return Ok(None);
588    }
589
590    let digits: String = trimmed
591        .chars()
592        .filter(|ch| !ch.is_ascii_whitespace() && *ch != '_')
593        .collect();
594
595    if digits.is_empty() {
596        bail!("MEMVID_LLM_CONTEXT_BUDGET must be a positive integer value");
597    }
598
599    let value: usize = digits.parse().map_err(|err| {
600        anyhow!(
601            "MEMVID_LLM_CONTEXT_BUDGET value '{}' is not a valid number: {}",
602            trimmed,
603            err
604        )
605    })?;
606
607    if value == 0 {
608        bail!("MEMVID_LLM_CONTEXT_BUDGET must be a positive integer");
609    }
610
611    Ok(Some(value))
612}
613
614use crate::gemini_embeddings::GeminiEmbeddingProvider;
615use crate::mistral_embeddings::MistralEmbeddingProvider;
616use crate::nvidia_embeddings::NvidiaEmbeddingProvider;
617use crate::openai_embeddings::OpenAIEmbeddingProvider;
618
619/// Internal embedding backend - local fastembed or remote providers.
620#[derive(Clone)]
621enum EmbeddingBackend {
622    #[cfg(feature = "local-embeddings")]
623    FastEmbed(std::sync::Arc<std::sync::Mutex<fastembed::TextEmbedding>>),
624    OpenAI(std::sync::Arc<OpenAIEmbeddingProvider>),
625    Nvidia(std::sync::Arc<NvidiaEmbeddingProvider>),
626    Gemini(std::sync::Arc<GeminiEmbeddingProvider>),
627    Mistral(std::sync::Arc<MistralEmbeddingProvider>),
628}
629
630/// Embedding runtime wrapper supporting local and remote embeddings
631#[derive(Clone)]
632pub struct EmbeddingRuntime {
633    backend: EmbeddingBackend,
634    model: EmbeddingModelChoice,
635    dimension: std::sync::Arc<AtomicUsize>,
636}
637
638impl EmbeddingRuntime {
639    #[cfg(feature = "local-embeddings")]
640    fn new_fastembed(
641        backend: fastembed::TextEmbedding,
642        model: EmbeddingModelChoice,
643        dimension: usize,
644    ) -> Self {
645        Self {
646            backend: EmbeddingBackend::FastEmbed(std::sync::Arc::new(std::sync::Mutex::new(
647                backend,
648            ))),
649            model,
650            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
651        }
652    }
653
654    fn new_openai(
655        provider: OpenAIEmbeddingProvider,
656        model: EmbeddingModelChoice,
657        dimension: usize,
658    ) -> Self {
659        Self {
660            backend: EmbeddingBackend::OpenAI(std::sync::Arc::new(provider)),
661            model,
662            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
663        }
664    }
665
666    fn new_nvidia(provider: NvidiaEmbeddingProvider, model: EmbeddingModelChoice) -> Self {
667        Self {
668            backend: EmbeddingBackend::Nvidia(std::sync::Arc::new(provider)),
669            model,
670            dimension: std::sync::Arc::new(AtomicUsize::new(0)),
671        }
672    }
673
674    fn new_gemini(
675        provider: GeminiEmbeddingProvider,
676        model: EmbeddingModelChoice,
677        dimension: usize,
678    ) -> Self {
679        Self {
680            backend: EmbeddingBackend::Gemini(std::sync::Arc::new(provider)),
681            model,
682            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
683        }
684    }
685
686    fn new_mistral(
687        provider: MistralEmbeddingProvider,
688        model: EmbeddingModelChoice,
689        dimension: usize,
690    ) -> Self {
691        Self {
692            backend: EmbeddingBackend::Mistral(std::sync::Arc::new(provider)),
693            model,
694            dimension: std::sync::Arc::new(AtomicUsize::new(dimension)),
695        }
696    }
697
698    const MAX_OPENAI_EMBEDDING_TEXT_LEN: usize = 20_000;
699    // NVIDIA Integrate embeddings enforce a 4096 token limit; use a tighter char cap as a guardrail.
700    const MAX_NVIDIA_EMBEDDING_TEXT_LEN: usize = 12_000;
701
702    // Gemini has an 8192 token limit, using conservative estimate
703    const MAX_GEMINI_EMBEDDING_TEXT_LEN: usize = 20_000;
704    // Mistral has an 8192 token limit, using conservative estimate
705    const MAX_MISTRAL_EMBEDDING_TEXT_LEN: usize = 20_000;
706
707    fn max_remote_embedding_chars(&self) -> usize {
708        match &self.backend {
709            EmbeddingBackend::OpenAI(_) => Self::MAX_OPENAI_EMBEDDING_TEXT_LEN,
710            EmbeddingBackend::Nvidia(_) => Self::MAX_NVIDIA_EMBEDDING_TEXT_LEN,
711            EmbeddingBackend::Gemini(_) => Self::MAX_GEMINI_EMBEDDING_TEXT_LEN,
712            EmbeddingBackend::Mistral(_) => Self::MAX_MISTRAL_EMBEDDING_TEXT_LEN,
713            #[cfg(feature = "local-embeddings")]
714            EmbeddingBackend::FastEmbed(_) => usize::MAX,
715        }
716    }
717
718    /// Truncate text for embedding to reduce the risk of provider token-limit errors.
719    fn truncate_for_embedding<'a>(
720        text: &'a str,
721        max_chars: usize,
722    ) -> std::borrow::Cow<'a, str> {
723        if text.len() <= max_chars {
724            std::borrow::Cow::Borrowed(text)
725        } else {
726            // Find the last valid UTF-8 char boundary within the limit
727            let truncated = &text[..max_chars];
728            let end = truncated
729                .char_indices()
730                .rev()
731                .next()
732                .map(|(i, c)| i + c.len_utf8())
733                .unwrap_or(max_chars);
734            tracing::info!("Truncated embedding text from {} to {} chars", text.len(), end);
735            std::borrow::Cow::Owned(text[..end].to_string())
736        }
737    }
738
739    fn note_dimension(&self, observed: usize) -> Result<()> {
740        if observed == 0 {
741            return Err(anyhow!("embedding provider returned zero-length embedding"));
742        }
743
744        let current = self.dimension.load(Ordering::Relaxed);
745        if current == 0 {
746            self.dimension.store(observed, Ordering::Relaxed);
747            return Ok(());
748        }
749
750        if current != observed {
751            return Err(anyhow!(
752                "embedding provider returned {observed}D vectors but runtime expects {current}D"
753            ));
754        }
755
756        Ok(())
757    }
758
759    fn truncate_if_remote<'a>(&self, text: &'a str) -> std::borrow::Cow<'a, str> {
760        match &self.backend {
761            EmbeddingBackend::OpenAI(_)
762            | EmbeddingBackend::Nvidia(_)
763            | EmbeddingBackend::Gemini(_)
764            | EmbeddingBackend::Mistral(_) => {
765                Self::truncate_for_embedding(text, self.max_remote_embedding_chars())
766            }
767            #[cfg(feature = "local-embeddings")]
768            EmbeddingBackend::FastEmbed(_) => std::borrow::Cow::Borrowed(text),
769        }
770    }
771
772    pub fn embed_passage(&self, text: &str) -> Result<Vec<f32>> {
773        let text = self.truncate_if_remote(text);
774        let embedding = match &self.backend {
775            #[cfg(feature = "local-embeddings")]
776            EmbeddingBackend::FastEmbed(model) => {
777                let mut guard = model
778                    .lock()
779                    .map_err(|_| anyhow!("fastembed runtime poisoned"))?;
780                let outputs = guard
781                    .embed(vec![text.into_owned()], None)
782                    .map_err(|err| anyhow!("failed to compute embedding with fastembed: {err}"))?;
783                outputs
784                    .into_iter()
785                    .next()
786                    .ok_or_else(|| anyhow!("fastembed returned no embedding output"))?
787            }
788            EmbeddingBackend::OpenAI(provider) => {
789                use memvid_core::EmbeddingProvider;
790                provider
791                    .embed_text(&text)
792                    .map_err(|err| anyhow!("failed to compute embedding with OpenAI: {err}"))?
793            }
794            EmbeddingBackend::Nvidia(provider) => provider
795                .embed_passage(&text)
796                .map_err(|err| anyhow!("failed to compute embedding with NVIDIA: {err}"))?,
797            EmbeddingBackend::Gemini(provider) => provider
798                .embed_text(&text)
799                .map_err(|err| anyhow!("failed to compute embedding with Gemini: {err}"))?,
800            EmbeddingBackend::Mistral(provider) => provider
801                .embed_text(&text)
802                .map_err(|err| anyhow!("failed to compute embedding with Mistral: {err}"))?,
803        };
804
805        self.note_dimension(embedding.len())?;
806        Ok(embedding)
807    }
808
809    pub fn embed_query(&self, text: &str) -> Result<Vec<f32>> {
810        let text = self.truncate_if_remote(text);
811        match &self.backend {
812            EmbeddingBackend::Nvidia(provider) => {
813                let embedding = provider
814                    .embed_query(&text)
815                    .map_err(|err| anyhow!("failed to compute embedding with NVIDIA: {err}"))?;
816                self.note_dimension(embedding.len())?;
817                Ok(embedding)
818            }
819            _ => self.embed_passage(&text),
820        }
821    }
822
823    pub fn embed_batch_passages(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>> {
824        if texts.is_empty() {
825            return Ok(Vec::new());
826        }
827
828        let truncated: Vec<std::borrow::Cow<'_, str>> =
829            texts.iter().map(|t| self.truncate_if_remote(t)).collect();
830        let truncated_refs: Vec<&str> = truncated.iter().map(|c| c.as_ref()).collect();
831
832        let embeddings = match &self.backend {
833            #[cfg(feature = "local-embeddings")]
834            EmbeddingBackend::FastEmbed(model) => {
835                let mut guard = model
836                    .lock()
837                    .map_err(|_| anyhow!("fastembed runtime poisoned"))?;
838                guard
839                    .embed(
840                        truncated_refs
841                            .iter()
842                            .map(|s| (*s).to_string())
843                            .collect::<Vec<String>>(),
844                        None,
845                    )
846                    .map_err(|err| anyhow!("failed to compute embeddings with fastembed: {err}"))?
847            }
848            EmbeddingBackend::OpenAI(provider) => {
849                use memvid_core::EmbeddingProvider;
850                provider
851                    .embed_batch(&truncated_refs)
852                    .map_err(|err| anyhow!("failed to compute embeddings with OpenAI: {err}"))?
853            }
854            EmbeddingBackend::Nvidia(provider) => provider
855                .embed_passages(&truncated_refs)
856                .map_err(|err| anyhow!("failed to compute embeddings with NVIDIA: {err}"))?,
857            EmbeddingBackend::Gemini(provider) => provider
858                .embed_batch(&truncated_refs)
859                .map_err(|err| anyhow!("failed to compute embeddings with Gemini: {err}"))?,
860            EmbeddingBackend::Mistral(provider) => provider
861                .embed_batch(&truncated_refs)
862                .map_err(|err| anyhow!("failed to compute embeddings with Mistral: {err}"))?,
863        };
864
865        if let Some(first) = embeddings.first() {
866            self.note_dimension(first.len())?;
867        }
868        if let Some(expected) = embeddings.first().map(|e| e.len()) {
869            if embeddings.iter().any(|e| e.len() != expected) {
870                return Err(anyhow!("embedding provider returned mixed vector dimensions"));
871            }
872        }
873
874        Ok(embeddings)
875    }
876
877    pub fn embed_batch_queries(&self, texts: &[&str]) -> Result<Vec<Vec<f32>>> {
878        if texts.is_empty() {
879            return Ok(Vec::new());
880        }
881
882        let truncated: Vec<std::borrow::Cow<'_, str>> =
883            texts.iter().map(|t| self.truncate_if_remote(t)).collect();
884        let truncated_refs: Vec<&str> = truncated.iter().map(|c| c.as_ref()).collect();
885
886        match &self.backend {
887            EmbeddingBackend::Nvidia(provider) => {
888                let embeddings = provider
889                    .embed_queries(&truncated_refs)
890                    .map_err(|err| anyhow!("failed to compute embeddings with NVIDIA: {err}"))?;
891
892                if let Some(first) = embeddings.first() {
893                    self.note_dimension(first.len())?;
894                }
895                if let Some(expected) = embeddings.first().map(|e| e.len()) {
896                    if embeddings.iter().any(|e| e.len() != expected) {
897                        return Err(anyhow!("embedding provider returned mixed vector dimensions"));
898                    }
899                }
900
901                Ok(embeddings)
902            }
903            _ => self.embed_batch_passages(&truncated_refs),
904        }
905    }
906
907    pub fn dimension(&self) -> usize {
908        self.dimension.load(Ordering::Relaxed)
909    }
910
911    pub fn model_choice(&self) -> EmbeddingModelChoice {
912        self.model
913    }
914
915    pub fn provider_kind(&self) -> &'static str {
916        match &self.backend {
917            #[cfg(feature = "local-embeddings")]
918            EmbeddingBackend::FastEmbed(_) => "fastembed",
919            EmbeddingBackend::OpenAI(_) => "openai",
920            EmbeddingBackend::Nvidia(_) => "nvidia",
921            EmbeddingBackend::Gemini(_) => "gemini",
922            EmbeddingBackend::Mistral(_) => "mistral",
923        }
924    }
925
926    pub fn provider_model_id(&self) -> String {
927        match &self.backend {
928            #[cfg(feature = "local-embeddings")]
929            EmbeddingBackend::FastEmbed(_) => self.model.canonical_model_id().to_string(),
930            EmbeddingBackend::OpenAI(provider) => {
931                use memvid_core::EmbeddingProvider;
932                provider.model().to_string()
933            }
934            EmbeddingBackend::Nvidia(provider) => provider.model().to_string(),
935            EmbeddingBackend::Gemini(provider) => provider.model().to_string(),
936            EmbeddingBackend::Mistral(provider) => provider.model().to_string(),
937        }
938    }
939}
940
941impl memvid_core::VecEmbedder for EmbeddingRuntime {
942    fn embed_query(&self, text: &str) -> memvid_core::Result<Vec<f32>> {
943        EmbeddingRuntime::embed_query(self, text).map_err(|err| {
944            memvid_core::MemvidError::EmbeddingFailed {
945                reason: err.to_string().into_boxed_str(),
946            }
947        })
948    }
949
950    fn embedding_dimension(&self) -> usize {
951        self.dimension()
952    }
953}
954
955/// Ensure fastembed cache directory exists
956#[cfg(feature = "local-embeddings")]
957fn ensure_fastembed_cache(config: &CliConfig) -> Result<PathBuf> {
958    use std::fs;
959
960    let cache_dir = config.models_dir.clone();
961    fs::create_dir_all(&cache_dir)?;
962    Ok(cache_dir)
963}
964
965/// Get approximate model size in MB for user-friendly error messages
966fn model_size_mb(model: EmbeddingModelChoice) -> usize {
967    match model {
968        EmbeddingModelChoice::BgeSmall => 33,
969        EmbeddingModelChoice::BgeBase => 110,
970        EmbeddingModelChoice::Nomic => 137,
971        EmbeddingModelChoice::GteLarge => 327,
972        // Remote/cloud models don't require local download
973        EmbeddingModelChoice::OpenAILarge
974        | EmbeddingModelChoice::OpenAISmall
975        | EmbeddingModelChoice::OpenAIAda
976        | EmbeddingModelChoice::Nvidia
977        | EmbeddingModelChoice::Gemini
978        | EmbeddingModelChoice::Mistral => 0,
979    }
980}
981
982/// Instantiate an embedding runtime with the configured model
983fn instantiate_embedding_runtime(config: &CliConfig) -> Result<EmbeddingRuntime> {
984    use tracing::info;
985
986    let embedding_model = config.embedding_model;
987
988    if embedding_model.dimensions() > 0 {
989        info!(
990            "Loading embedding model: {} ({}D)",
991            embedding_model.name(),
992            embedding_model.dimensions()
993        );
994    } else {
995        info!("Loading embedding model: {}", embedding_model.name());
996    }
997
998    if config.offline && embedding_model.is_remote() {
999        anyhow::bail!(
1000            "remote embeddings are unavailable while offline; set MEMVID_OFFLINE=0 or use a local embedding model"
1001        );
1002    }
1003
1004    // Check if OpenAI model
1005    if embedding_model.is_openai() {
1006        return instantiate_openai_runtime(embedding_model);
1007    }
1008
1009    if embedding_model == EmbeddingModelChoice::Nvidia {
1010        return instantiate_nvidia_runtime(None);
1011    }
1012
1013    if embedding_model == EmbeddingModelChoice::Gemini {
1014        return instantiate_gemini_runtime();
1015    }
1016
1017    if embedding_model == EmbeddingModelChoice::Mistral {
1018        return instantiate_mistral_runtime();
1019    }
1020
1021    // Local fastembed model
1022    #[cfg(feature = "local-embeddings")]
1023    {
1024        return instantiate_fastembed_runtime(config, embedding_model);
1025    }
1026
1027    #[cfg(not(feature = "local-embeddings"))]
1028    {
1029        anyhow::bail!(
1030            "Local embeddings are not available on this platform. \
1031            Please use a remote embedding provider:\n\
1032            - Set OPENAI_API_KEY and use --embedding-model openai-large\n\
1033            - Set GEMINI_API_KEY and use --embedding-model gemini\n\
1034            - Set MISTRAL_API_KEY and use --embedding-model mistral\n\
1035            - Set NVIDIA_API_KEY and use --embedding-model nvidia"
1036        );
1037    }
1038}
1039
1040/// Instantiate OpenAI embedding runtime
1041fn instantiate_openai_runtime(embedding_model: EmbeddingModelChoice) -> Result<EmbeddingRuntime> {
1042    use anyhow::bail;
1043    use memvid_core::EmbeddingConfig;
1044    use tracing::info;
1045
1046    let api_key = std::env::var("OPENAI_API_KEY").map_err(|_| {
1047        anyhow!("OPENAI_API_KEY environment variable is required for OpenAI embeddings")
1048    })?;
1049
1050    if api_key.is_empty() {
1051        bail!("OPENAI_API_KEY cannot be empty");
1052    }
1053
1054    let config = match embedding_model {
1055        EmbeddingModelChoice::OpenAILarge => EmbeddingConfig::openai_large(),
1056        EmbeddingModelChoice::OpenAISmall => EmbeddingConfig::openai_small(),
1057        EmbeddingModelChoice::OpenAIAda => EmbeddingConfig::openai_ada(),
1058        _ => unreachable!("is_openai() should have been false"),
1059    };
1060
1061    let provider = OpenAIEmbeddingProvider::new(api_key, config.clone())
1062        .map_err(|err| anyhow!("failed to create OpenAI embedding provider: {err}"))?;
1063
1064    info!(
1065        "OpenAI embedding provider ready: model={}, dimension={}",
1066        config.model, config.dimension
1067    );
1068
1069    Ok(EmbeddingRuntime::new_openai(
1070        provider,
1071        embedding_model,
1072        config.dimension,
1073    ))
1074}
1075
1076fn normalize_nvidia_embedding_model_override(raw: &str) -> Option<String> {
1077    let trimmed = raw.trim();
1078    if trimmed.is_empty() {
1079        return None;
1080    }
1081
1082    let lowered = trimmed.to_ascii_lowercase();
1083    if lowered == "nvidia" || lowered == "nv" {
1084        return None;
1085    }
1086
1087    let without_prefix = trimmed
1088        .strip_prefix("nvidia:")
1089        .or_else(|| trimmed.strip_prefix("nv:"))
1090        .unwrap_or(trimmed)
1091        .trim();
1092
1093    if without_prefix.is_empty() {
1094        return None;
1095    }
1096
1097    if without_prefix.eq_ignore_ascii_case("nv-embed-v1") {
1098        return Some("nvidia/nv-embed-v1".to_string());
1099    }
1100
1101    if without_prefix.contains('/') {
1102        return Some(without_prefix.to_string());
1103    }
1104
1105    Some(format!("nvidia/{without_prefix}"))
1106}
1107
1108/// Instantiate NVIDIA embedding runtime
1109fn instantiate_nvidia_runtime(model_override: Option<&str>) -> Result<EmbeddingRuntime> {
1110    use tracing::info;
1111
1112    let normalized = model_override.and_then(normalize_nvidia_embedding_model_override);
1113    let provider = NvidiaEmbeddingProvider::from_env(normalized.as_deref())
1114        .map_err(|err| anyhow!("failed to create NVIDIA embedding provider: {err}"))?;
1115
1116    info!(
1117        "NVIDIA embedding provider ready: model={}",
1118        provider.model()
1119    );
1120
1121    Ok(EmbeddingRuntime::new_nvidia(
1122        provider,
1123        EmbeddingModelChoice::Nvidia,
1124    ))
1125}
1126
1127/// Instantiate Gemini embedding runtime
1128fn instantiate_gemini_runtime() -> Result<EmbeddingRuntime> {
1129    use tracing::info;
1130
1131    let provider = GeminiEmbeddingProvider::from_env()
1132        .map_err(|err| anyhow!("failed to create Gemini embedding provider: {err}"))?;
1133
1134    let dimension = provider.dimension();
1135    info!(
1136        "Gemini embedding provider ready: model={}, dimension={}",
1137        provider.model(),
1138        dimension
1139    );
1140
1141    Ok(EmbeddingRuntime::new_gemini(
1142        provider,
1143        EmbeddingModelChoice::Gemini,
1144        dimension,
1145    ))
1146}
1147
1148/// Instantiate Mistral embedding runtime
1149fn instantiate_mistral_runtime() -> Result<EmbeddingRuntime> {
1150    use tracing::info;
1151
1152    let provider = MistralEmbeddingProvider::from_env()
1153        .map_err(|err| anyhow!("failed to create Mistral embedding provider: {err}"))?;
1154
1155    let dimension = provider.dimension();
1156    info!(
1157        "Mistral embedding provider ready: model={}, dimension={}",
1158        provider.model(),
1159        dimension
1160    );
1161
1162    Ok(EmbeddingRuntime::new_mistral(
1163        provider,
1164        EmbeddingModelChoice::Mistral,
1165        dimension,
1166    ))
1167}
1168
1169/// Instantiate fastembed (local) embedding runtime
1170#[cfg(feature = "local-embeddings")]
1171fn instantiate_fastembed_runtime(
1172    config: &CliConfig,
1173    embedding_model: EmbeddingModelChoice,
1174) -> Result<EmbeddingRuntime> {
1175    use anyhow::bail;
1176    use fastembed::{InitOptions, TextEmbedding};
1177    use std::fs;
1178
1179    let cache_dir = ensure_fastembed_cache(config)?;
1180
1181    if config.offline {
1182        let mut entries = fs::read_dir(&cache_dir)?;
1183        if entries.next().is_none() {
1184            bail!(
1185                "semantic embeddings unavailable while offline; allow one connected run so fastembed can cache model weights"
1186            );
1187        }
1188    }
1189
1190    let options = InitOptions::new(embedding_model.to_fastembed_model())
1191        .with_cache_dir(cache_dir)
1192        .with_show_download_progress(true);
1193    let mut model = TextEmbedding::try_new(options).map_err(|err| {
1194        // Provide platform-specific guidance for model download issues
1195        let platform_hint = if cfg!(target_os = "windows") {
1196            "\n\nWindows users: If model downloads fail, try:\n\
1197            1. Run as Administrator\n\
1198            2. Check your antivirus isn't blocking downloads\n\
1199            3. Use OpenAI embeddings instead: set OPENAI_API_KEY and use --embedding-model openai"
1200        } else if cfg!(target_os = "linux") {
1201            "\n\nLinux users: If model downloads fail, try:\n\
1202            1. Check disk space in ~/.memvid/models\n\
1203            2. Ensure you have network access to huggingface.co\n\
1204            3. Use OpenAI embeddings instead: export OPENAI_API_KEY=... and use --embedding-model openai"
1205        } else {
1206            "\n\nIf model downloads fail, try using OpenAI embeddings:\n\
1207            export OPENAI_API_KEY=your-key && memvid ... --embedding-model openai"
1208        };
1209
1210        anyhow!(
1211            "Failed to initialize embedding model '{}': {err}\n\n\
1212            This typically means the model couldn't be downloaded or loaded.\n\
1213            Model size: ~{} MB{}\n\n\
1214            See: https://docs.memvid.com/embedding-models",
1215            embedding_model.name(),
1216            model_size_mb(embedding_model),
1217            platform_hint
1218        )
1219    })?;
1220
1221    let probe = model
1222        .embed(vec!["memvid probe".to_string()], None)
1223        .map_err(|err| anyhow!("failed to determine embedding dimension: {err}"))?;
1224    let dimension = probe.first().map(|vec| vec.len()).unwrap_or(0);
1225
1226    if dimension == 0 {
1227        bail!("fastembed reported zero-length embeddings");
1228    }
1229
1230    // Verify dimension matches expected
1231    if dimension != embedding_model.dimensions() {
1232        tracing::warn!(
1233            "Embedding dimension mismatch: expected {}, got {}",
1234            embedding_model.dimensions(),
1235            dimension
1236        );
1237    }
1238
1239    Ok(EmbeddingRuntime::new_fastembed(model, embedding_model, dimension))
1240}
1241
1242/// Load embedding runtime (fails if unavailable)
1243pub fn load_embedding_runtime(config: &CliConfig) -> Result<EmbeddingRuntime> {
1244    use anyhow::bail;
1245
1246    match instantiate_embedding_runtime(config) {
1247        Ok(runtime) => Ok(runtime),
1248        Err(err) => {
1249            if config.offline {
1250                bail!(
1251                    "semantic embeddings unavailable while offline; allow one connected run so fastembed can cache model weights ({err})"
1252                );
1253            }
1254            Err(err)
1255        }
1256    }
1257}
1258
1259/// Try to load embedding runtime (returns None if unavailable)
1260pub fn try_load_embedding_runtime(config: &CliConfig) -> Option<EmbeddingRuntime> {
1261    use tracing::warn;
1262
1263    match instantiate_embedding_runtime(config) {
1264        Ok(runtime) => Some(runtime),
1265        Err(err) => {
1266            warn!("semantic embeddings unavailable: {err}");
1267            None
1268        }
1269    }
1270}
1271
1272/// Load embedding runtime with an optional model override.
1273/// If `model_override` is provided, it will be used instead of the config's embedding_model.
1274pub fn load_embedding_runtime_with_model(
1275    config: &CliConfig,
1276    model_override: Option<&str>,
1277) -> Result<EmbeddingRuntime> {
1278    use tracing::info;
1279
1280    let mut raw_override: Option<&str> = None;
1281    let embedding_model = match model_override {
1282        Some(model_str) => {
1283            raw_override = Some(model_str);
1284            let parsed = model_str.parse::<EmbeddingModelChoice>()?;
1285            if parsed.dimensions() > 0 {
1286                info!(
1287                    "Using embedding model override: {} ({}D)",
1288                    parsed.name(),
1289                    parsed.dimensions()
1290                );
1291            } else {
1292                info!("Using embedding model override: {}", parsed.name());
1293            }
1294            parsed
1295        }
1296        None => config.embedding_model,
1297    };
1298
1299    if embedding_model.dimensions() > 0 {
1300        info!(
1301            "Loading embedding model: {} ({}D)",
1302            embedding_model.name(),
1303            embedding_model.dimensions()
1304        );
1305    } else {
1306        info!("Loading embedding model: {}", embedding_model.name());
1307    }
1308
1309    if config.offline && embedding_model.is_remote() {
1310        anyhow::bail!(
1311            "remote embeddings are unavailable while offline; set MEMVID_OFFLINE=0 or use a local embedding model"
1312        );
1313    }
1314
1315    if embedding_model.is_openai() {
1316        return instantiate_openai_runtime(embedding_model);
1317    }
1318
1319    if embedding_model == EmbeddingModelChoice::Nvidia {
1320        return instantiate_nvidia_runtime(raw_override);
1321    }
1322
1323    if embedding_model == EmbeddingModelChoice::Gemini {
1324        return instantiate_gemini_runtime();
1325    }
1326
1327    if embedding_model == EmbeddingModelChoice::Mistral {
1328        return instantiate_mistral_runtime();
1329    }
1330
1331    #[cfg(feature = "local-embeddings")]
1332    {
1333        return instantiate_fastembed_runtime(config, embedding_model);
1334    }
1335
1336    #[cfg(not(feature = "local-embeddings"))]
1337    {
1338        anyhow::bail!(
1339            "Local embeddings are not available on this platform. \
1340            Please use a remote embedding provider."
1341        );
1342    }
1343}
1344
1345/// Try to load embedding runtime with model override (returns None if unavailable)
1346pub fn try_load_embedding_runtime_with_model(
1347    config: &CliConfig,
1348    model_override: Option<&str>,
1349) -> Option<EmbeddingRuntime> {
1350    use tracing::warn;
1351
1352    match load_embedding_runtime_with_model(config, model_override) {
1353        Ok(runtime) => Some(runtime),
1354        Err(err) => {
1355            warn!("semantic embeddings unavailable: {err}");
1356            None
1357        }
1358    }
1359}
1360
1361/// Load embedding runtime by auto-detecting from MV2 vector dimension.
1362///
1363/// Priority:
1364/// 1. Explicit model override (--query-embedding-model flag)
1365/// 2. Auto-detect from MV2 file's stored dimension
1366/// 3. Fall back to config default
1367///
1368/// This allows users to omit --query-embedding-model when querying files
1369/// created with non-default embedding models (like OpenAI).
1370pub fn load_embedding_runtime_for_mv2(
1371    config: &CliConfig,
1372    model_override: Option<&str>,
1373    mv2_dimension: Option<u32>,
1374) -> Result<EmbeddingRuntime> {
1375    use tracing::info;
1376
1377    // Priority 1: Explicit override
1378    if let Some(model_str) = model_override {
1379        return load_embedding_runtime_with_model(config, Some(model_str));
1380    }
1381
1382    // Priority 2: Auto-detect from MV2 dimension
1383    if let Some(dim) = mv2_dimension {
1384        if let Some(detected_model) = EmbeddingModelChoice::from_dimension(dim) {
1385            info!(
1386                "Auto-detected embedding model from MV2: {} ({}D)",
1387                detected_model.name(),
1388                dim
1389            );
1390
1391            // For OpenAI models, check if API key is available
1392            if detected_model.is_openai() {
1393                if std::env::var("OPENAI_API_KEY").is_ok() {
1394                    return load_embedding_runtime_with_model(config, Some(detected_model.name()));
1395                } else {
1396                    // OpenAI detected but no API key - provide helpful error
1397                    return Err(anyhow!(
1398                        "MV2 file uses OpenAI embeddings ({}D) but OPENAI_API_KEY is not set.\n\n\
1399                        Options:\n\
1400                        1. Set OPENAI_API_KEY environment variable\n\
1401                        2. Use --query-embedding-model to specify a different model\n\
1402                        3. Use lexical-only search with --mode lex\n\n\
1403                        See: https://docs.memvid.com/embedding-models",
1404                        dim
1405                    ));
1406                }
1407            }
1408
1409            return load_embedding_runtime_with_model(config, Some(detected_model.name()));
1410        }
1411    }
1412
1413    // Priority 3: Fall back to config default
1414    load_embedding_runtime(config)
1415}
1416
1417/// Try to load embedding runtime for MV2 with auto-detection (returns None if unavailable)
1418pub fn try_load_embedding_runtime_for_mv2(
1419    config: &CliConfig,
1420    model_override: Option<&str>,
1421    mv2_dimension: Option<u32>,
1422) -> Option<EmbeddingRuntime> {
1423    use tracing::warn;
1424
1425    match load_embedding_runtime_for_mv2(config, model_override, mv2_dimension) {
1426        Ok(runtime) => Some(runtime),
1427        Err(err) => {
1428            warn!("semantic embeddings unavailable: {err}");
1429            None
1430        }
1431    }
1432}