Skip to main content

memory_mcp/index/
usearch.rs

1use std::{
2    borrow::Cow,
3    collections::HashMap,
4    path::Path,
5    sync::{
6        atomic::{AtomicUsize, Ordering},
7        Mutex, RwLock,
8    },
9};
10
11use usearch::{Index, IndexOptions, MetricKind, ScalarKind};
12
13use crate::{
14    error::MemoryError,
15    types::{validate_name, Scope, ScopeFilter},
16};
17
18// ---------------------------------------------------------------------------
19// RawIndex — private trait abstracting the usearch Index operations
20// ---------------------------------------------------------------------------
21
22const INITIAL_INDEX_CAPACITY: usize = 1024;
23
24struct RawSearchResults {
25    keys: Vec<u64>,
26    distances: Vec<f32>,
27}
28
29type RawIndexError = Box<dyn std::error::Error + Send + Sync>;
30
31/// Private trait over the raw vector index operations. Production code uses
32/// `UsearchRawIndex`; tests can substitute `FailingRawIndex`.
33///
34/// Uses `RawIndexError` (not `MemoryError`) so that the error conversion
35/// boundary between backend and `VectorIndex` is real and testable.
36trait RawIndex: Send + Sync + Sized {
37    fn create(dimensions: usize) -> Result<Self, RawIndexError>;
38    fn add(&self, key: u64, vector: &[f32]) -> Result<(), RawIndexError>;
39    fn remove(&self, key: u64) -> Result<(), RawIndexError>;
40    fn search(&self, query: &[f32], count: usize) -> Result<RawSearchResults, RawIndexError>;
41    fn save(&self, path: &str) -> Result<(), RawIndexError>;
42    fn reserve(&self, capacity: usize) -> Result<(), RawIndexError>;
43    fn size(&self) -> usize;
44    fn capacity(&self) -> usize;
45}
46
47// ---------------------------------------------------------------------------
48// UsearchRawIndex — production RawIndex backed by usearch::Index
49// ---------------------------------------------------------------------------
50
51struct UsearchRawIndex {
52    inner: Index,
53}
54
55impl RawIndex for UsearchRawIndex {
56    fn create(dimensions: usize) -> Result<Self, RawIndexError> {
57        let options = IndexOptions {
58            dimensions,
59            metric: MetricKind::Cos,
60            quantization: ScalarKind::F32,
61            ..Default::default()
62        };
63        let inner = Index::new(&options)?;
64        inner.reserve(INITIAL_INDEX_CAPACITY)?;
65        Ok(Self { inner })
66    }
67
68    fn add(&self, key: u64, vector: &[f32]) -> Result<(), RawIndexError> {
69        self.inner.add(key, vector).map_err(|e| e.into())
70    }
71
72    fn remove(&self, key: u64) -> Result<(), RawIndexError> {
73        self.inner.remove(key).map(|_| ()).map_err(|e| e.into())
74    }
75
76    fn search(&self, query: &[f32], count: usize) -> Result<RawSearchResults, RawIndexError> {
77        let m = self.inner.search(query, count)?;
78        Ok(RawSearchResults {
79            keys: m.keys,
80            distances: m.distances,
81        })
82    }
83
84    fn save(&self, path: &str) -> Result<(), RawIndexError> {
85        self.inner.save(path).map_err(|e| e.into())
86    }
87
88    fn reserve(&self, capacity: usize) -> Result<(), RawIndexError> {
89        self.inner.reserve(capacity).map_err(|e| e.into())
90    }
91
92    fn size(&self) -> usize {
93        self.inner.size()
94    }
95
96    fn capacity(&self) -> usize {
97        self.inner.capacity()
98    }
99}
100
101// ---------------------------------------------------------------------------
102// VectorIndex — generic over RawIndex
103// ---------------------------------------------------------------------------
104
105/// Internal state kept behind the mutex.
106struct VectorState<R: RawIndex> {
107    index: R,
108    /// Maps usearch u64 keys → memory name strings.
109    key_map: HashMap<u64, String>,
110    /// Reverse map: memory name strings → usearch u64 keys (derived from key_map).
111    name_map: HashMap<String, u64>,
112    /// Monotonic counter used to assign unique vector keys.
113    next_key: u64,
114    /// Commit SHA at the time this index was last saved/loaded.
115    commit_sha: Option<String>,
116}
117
118/// Wraps a `RawIndex` implementation and a key-map behind a single `std::sync::Mutex`.
119///
120/// `VectorIndex<UsearchRawIndex>` is the production path. In tests,
121/// `VectorIndex<FailingRawIndex>` enables failure injection.
122struct VectorIndex<R: RawIndex = UsearchRawIndex> {
123    state: Mutex<VectorState<R>>,
124    entry_count: AtomicUsize,
125}
126
127impl VectorIndex<UsearchRawIndex> {
128    /// Load an existing index from `path`. Also reads `<path>.keys.json`.
129    fn load(path: &Path) -> Result<Self, MemoryError> {
130        let path_str = path.to_str().ok_or_else(|| MemoryError::InvalidInput {
131            reason: "non-UTF-8 index path".to_string(),
132        })?;
133
134        // We need to know dimensions to create the IndexOptions for load.
135        // usearch::Index::load() restores dimensions from the file, so we
136        // use placeholder options here — they are overwritten on load.
137        let options = IndexOptions {
138            dimensions: 1, // overwritten by load()
139            metric: MetricKind::Cos,
140            quantization: ScalarKind::F32,
141            ..Default::default()
142        };
143        let inner = Index::new(&options)
144            .map_err(|e| MemoryError::Index(format!("init for load: {}", e)))?;
145        inner
146            .load(path_str)
147            .map_err(|e| MemoryError::Index(format!("load: {}", e)))?;
148
149        // Load the key map and counter.
150        let keys_path = format!("{}.keys.json", path_str);
151        let (key_map, next_key, commit_sha): (HashMap<u64, String>, u64, Option<String>) =
152            if std::path::Path::new(&keys_path).exists() {
153                let json = std::fs::read_to_string(&keys_path)?;
154                // Support both old format (bare HashMap) and new format ({key_map, next_key}).
155                let value: serde_json::Value = serde_json::from_str(&json)
156                    .map_err(|e| MemoryError::Index(format!("keymap deserialise: {}", e)))?;
157                if value.is_object() && value.get("key_map").is_some() {
158                    let km: HashMap<u64, String> = serde_json::from_value(value["key_map"].clone())
159                        .map_err(|e| MemoryError::Index(format!("keymap deserialise: {}", e)))?;
160                    let nk: u64 = value["next_key"]
161                        .as_u64()
162                        .unwrap_or_else(|| km.keys().max().map(|k| k + 1).unwrap_or(0));
163                    let sha: Option<String> = value
164                        .get("commit_sha")
165                        .and_then(|v| v.as_str())
166                        .map(|s| s.to_string());
167                    (km, nk, sha)
168                } else {
169                    // Legacy format: bare HashMap.
170                    let km: HashMap<u64, String> = serde_json::from_value(value)
171                        .map_err(|e| MemoryError::Index(format!("keymap deserialise: {}", e)))?;
172                    let nk = km.keys().max().map(|k| k + 1).unwrap_or(0);
173                    (km, nk, None)
174                }
175            } else {
176                (HashMap::new(), 0, None)
177            };
178
179        let name_map: HashMap<String, u64> = key_map.iter().map(|(&k, v)| (v.clone(), k)).collect();
180        if key_map.len() != name_map.len() {
181            tracing::warn!(
182                key_map_len = key_map.len(),
183                name_map_len = name_map.len(),
184                "key_map and name_map have different sizes; index may contain duplicate names"
185            );
186        }
187
188        let count = key_map.len();
189        Ok(Self {
190            state: Mutex::new(VectorState {
191                index: UsearchRawIndex { inner },
192                key_map,
193                name_map,
194                next_key,
195                commit_sha,
196            }),
197            entry_count: AtomicUsize::new(count),
198        })
199    }
200}
201
202/// Convert a `RawIndexError` to `MemoryError::Index`, preserving the message
203/// but stripping backend-specific type identity.
204fn raw_err(e: RawIndexError) -> MemoryError {
205    MemoryError::Index(e.to_string())
206}
207
208impl<R: RawIndex> VectorIndex<R> {
209    fn new(dimensions: usize) -> Result<Self, MemoryError> {
210        let index = R::create(dimensions).map_err(raw_err)?;
211        Ok(Self {
212            state: Mutex::new(VectorState {
213                index,
214                key_map: HashMap::new(),
215                name_map: HashMap::new(),
216                next_key: 0,
217                commit_sha: None,
218            }),
219            entry_count: AtomicUsize::new(0),
220        })
221    }
222
223    fn grow_if_needed_inner(state: &VectorState<R>, additional: usize) -> Result<(), MemoryError> {
224        let current_capacity = state.index.capacity();
225        let current_size = state.index.size();
226        if current_size + additional > current_capacity {
227            let new_capacity = (current_capacity + additional).max(current_capacity * 2);
228            state.index.reserve(new_capacity).map_err(raw_err)?;
229        }
230        Ok(())
231    }
232
233    /// Find the vector key associated with a qualified memory name.
234    fn find_key_by_name(&self, name: &str) -> Option<u64> {
235        let state = self
236            .state
237            .lock()
238            .expect("lock poisoned — prior panic corrupted state");
239        state.name_map.get(name).copied()
240    }
241
242    /// Atomically allocate the next key and add the vector in one lock acquisition.
243    /// Returns the assigned key on success. On failure the counter is not advanced.
244    fn add_with_next_key(&self, vector: &[f32], name: String) -> Result<u64, MemoryError> {
245        let mut state = self
246            .state
247            .lock()
248            .expect("lock poisoned — prior panic corrupted state");
249        Self::grow_if_needed_inner(&state, 1)?;
250        let key = state.next_key;
251        state.index.add(key, vector).map_err(raw_err)?;
252        state.name_map.insert(name.clone(), key);
253        state.key_map.insert(key, name);
254        state.next_key = state
255            .next_key
256            .checked_add(1)
257            .expect("vector key space exhausted");
258        self.entry_count
259            .store(state.key_map.len(), Ordering::Relaxed);
260        Ok(key)
261    }
262
263    /// Search for the `limit` nearest neighbours of `query`.
264    ///
265    /// Returns `(key, name, distance)` triples sorted by ascending distance.
266    fn search(&self, query: &[f32], limit: usize) -> Result<Vec<(u64, String, f32)>, MemoryError> {
267        let state = self
268            .state
269            .lock()
270            .expect("lock poisoned — prior panic corrupted state");
271        let raw = state.index.search(query, limit).map_err(raw_err)?;
272
273        let results = raw
274            .keys
275            .into_iter()
276            .zip(raw.distances)
277            .filter_map(|(key, dist)| {
278                state
279                    .key_map
280                    .get(&key)
281                    .map(|name| (key, name.clone(), dist))
282            })
283            .collect();
284        Ok(results)
285    }
286
287    /// Remove a vector by key.
288    fn remove(&self, key: u64) -> Result<(), MemoryError> {
289        let mut state = self
290            .state
291            .lock()
292            .expect("lock poisoned — prior panic corrupted state");
293        state.index.remove(key).map_err(raw_err)?;
294        if let Some(name) = state.key_map.remove(&key) {
295            // Only remove from name_map if it still points to this key.
296            // An upsert may have already updated name_map to point to a newer key.
297            if state.name_map.get(&name).copied() == Some(key) {
298                state.name_map.remove(&name);
299            }
300            self.entry_count
301                .store(state.key_map.len(), Ordering::Relaxed);
302        }
303        Ok(())
304    }
305
306    /// Atomically roll back a failed `add_with_next_key` call.
307    ///
308    /// Removes `new_key` from the raw index and key_map, then restores
309    /// `name_map` to point to `old_key` (or removes the name entry if there
310    /// was no prior entry).  All mutations happen under a single lock
311    /// acquisition, preventing a partial rollback from leaving inconsistent
312    /// state.
313    fn rollback_add(&self, new_key: u64, old_key: Option<u64>, name: &str) {
314        let mut state = self
315            .state
316            .lock()
317            .expect("lock poisoned — prior panic corrupted state");
318        // Remove the new entry from raw index (best-effort; log on failure).
319        if let Err(e) = state.index.remove(new_key) {
320            tracing::warn!(error = %e, "rollback: raw index remove failed");
321        }
322        // Remove the new key from key_map.
323        state.key_map.remove(&new_key);
324        // Restore name_map to point to the old key (or remove if no old key).
325        if let Some(old) = old_key {
326            state.name_map.insert(name.to_owned(), old);
327        } else {
328            state.name_map.remove(name);
329        }
330        self.entry_count
331            .store(state.key_map.len(), Ordering::Relaxed);
332    }
333
334    /// Return the number of entries currently in the key map.
335    fn key_count(&self) -> usize {
336        self.entry_count.load(Ordering::Relaxed)
337    }
338
339    /// Return the commit SHA stored in the index metadata (if any).
340    fn commit_sha(&self) -> Option<String> {
341        let state = self
342            .state
343            .lock()
344            .expect("lock poisoned — prior panic corrupted state");
345        state.commit_sha.clone()
346    }
347
348    /// Set the commit SHA in the index metadata.
349    fn set_commit_sha(&self, sha: Option<&str>) {
350        let mut state = self
351            .state
352            .lock()
353            .expect("lock poisoned — prior panic corrupted state");
354        state.commit_sha = sha.map(|s| s.to_owned());
355    }
356
357    /// Persist the index to `path`. Also writes `<path>.keys.json`.
358    fn save(&self, path: &Path) -> Result<(), MemoryError> {
359        let path_str = path.to_str().ok_or_else(|| MemoryError::InvalidInput {
360            reason: "non-UTF-8 index path".to_string(),
361        })?;
362
363        let state = self
364            .state
365            .lock()
366            .expect("lock poisoned — prior panic corrupted state");
367        state.index.save(path_str).map_err(raw_err)?;
368
369        // Persist the key map and counter alongside the index.
370        let keys_path = format!("{}.keys.json", path_str);
371        let payload = serde_json::json!({
372            "key_map": &state.key_map,
373            "next_key": state.next_key,
374            "commit_sha": state.commit_sha,
375        });
376        let json = serde_json::to_string(&payload)
377            .map_err(|e| MemoryError::Index(format!("keymap serialise: {}", e)))?;
378        std::fs::write(&keys_path, json)?;
379
380        Ok(())
381    }
382}
383
384// ---------------------------------------------------------------------------
385// UsearchStore — implements VectorStore backed by VectorIndex<UsearchRawIndex>
386// ---------------------------------------------------------------------------
387
388/// Manages multiple `VectorIndex` instances — one per scope (global, each
389/// project) plus a combined "all" index. Every memory exists in exactly two
390/// indexes: its scope-specific index + the "all" index.
391///
392/// `UsearchStore` is `Send + Sync` because all inner state is protected by
393/// `RwLock` / `Mutex`.
394#[non_exhaustive]
395pub struct UsearchStore {
396    inner: UsearchStoreInner<UsearchRawIndex>,
397}
398
399/// Generic inner implementation, separated so tests can substitute `R`.
400struct UsearchStoreInner<R: RawIndex> {
401    /// Per-scope indexes (global + each project).
402    scopes: RwLock<HashMap<Scope, VectorIndex<R>>>,
403    /// Combined index containing all vectors.
404    all: VectorIndex<R>,
405    /// Embedding dimensions (needed to create new scope indexes).
406    dimensions: usize,
407}
408
409// Locking order: `scopes` (RwLock) is always acquired before any
410// `VectorIndex::state` (Mutex). Never hold a VectorIndex Mutex while
411// acquiring `scopes`. The `all` index is accessed directly (not through
412// `scopes`), but always while `scopes` is already held or after it has
413// been released — never in the reverse order.
414
415impl UsearchStore {
416    /// Create a new `UsearchStore` with empty global + all indexes.
417    pub fn new(dimensions: usize) -> Result<Self, MemoryError> {
418        let global = VectorIndex::new(dimensions)?;
419        let all = VectorIndex::new(dimensions)?;
420        let mut scopes = HashMap::new();
421        scopes.insert(Scope::Global, global);
422        Ok(Self {
423            inner: UsearchStoreInner {
424                scopes: RwLock::new(scopes),
425                all,
426                dimensions,
427            },
428        })
429    }
430
431    /// Load all indexes from subdirectories under `dir`.
432    ///
433    /// Missing subdirectories are treated as empty — those scopes will be
434    /// rebuilt incrementally on next use.
435    pub fn load(dir: &Path, dimensions: usize) -> Result<Self, MemoryError> {
436        let span = tracing::info_span!("index.load", key_count = tracing::field::Empty,);
437        let _enter = span.enter();
438
439        // If a previous save was interrupted, the on-disk state may be
440        // inconsistent (some indexes from current state, others from prior).
441        // Rather than loading mixed data, start fresh — indexes are a cache
442        // that can always be rebuilt from the source-of-truth markdown files.
443        let dirty_marker = dir.join(".save-in-progress");
444        if dirty_marker.exists() {
445            tracing::warn!("detected interrupted index save — discarding indexes");
446            let _ = std::fs::remove_file(&dirty_marker);
447            return Self::new(dimensions);
448        }
449
450        // Load all-index.
451        let all_path = dir.join("all").join("index.usearch");
452        let all = if all_path.exists() {
453            VectorIndex::load(&all_path)?
454        } else {
455            VectorIndex::new(dimensions)?
456        };
457
458        let mut scopes: HashMap<Scope, VectorIndex<UsearchRawIndex>> = HashMap::new();
459
460        // Load global index.
461        let global_path = dir.join("global").join("index.usearch");
462        let global = if global_path.exists() {
463            VectorIndex::load(&global_path)?
464        } else {
465            VectorIndex::new(dimensions)?
466        };
467        scopes.insert(Scope::Global, global);
468
469        // Scan for project indexes under projects/*/
470        let projects_dir = dir.join("projects");
471        if projects_dir.is_dir() {
472            let entries = std::fs::read_dir(&projects_dir)
473                .map_err(|e| MemoryError::Index(format!("read projects dir: {}", e)))?;
474            for entry in entries {
475                let entry =
476                    entry.map_err(|e| MemoryError::Index(format!("read dir entry: {}", e)))?;
477                let path = entry.path();
478                if path.is_dir() {
479                    let project_name = path
480                        .file_name()
481                        .and_then(|n| n.to_str())
482                        .map(|s| s.to_string())
483                        .ok_or_else(|| {
484                            MemoryError::Index("non-UTF-8 project directory name".to_string())
485                        })?;
486                    if let Err(e) = validate_name(&project_name) {
487                        tracing::warn!(
488                            project_name = %project_name,
489                            error = %e,
490                            "skipping project index with invalid name"
491                        );
492                        continue;
493                    }
494                    let index_path = path.join("index.usearch");
495                    if index_path.exists() {
496                        let idx = VectorIndex::load(&index_path)?;
497                        scopes.insert(Scope::Project(project_name), idx);
498                    }
499                }
500            }
501        }
502
503        let key_count = all.key_count();
504        span.record("key_count", key_count);
505
506        Ok(Self {
507            inner: UsearchStoreInner {
508                scopes: RwLock::new(scopes),
509                all,
510                dimensions,
511            },
512        })
513    }
514}
515
516// Shared logic for both production and test-generic paths.
517impl<R: RawIndex> UsearchStoreInner<R> {
518    fn add(
519        &self,
520        scope: &Scope,
521        vector: &[f32],
522        qualified_name: String,
523    ) -> Result<u64, MemoryError> {
524        let dimensions = vector.len();
525        let span = tracing::debug_span!(
526            "index.add",
527            scope = %scope.dir_prefix(),
528            dimensions,
529            key_count = tracing::field::Empty,
530        );
531        let _enter = span.enter();
532
533        if vector.len() != self.dimensions {
534            return Err(MemoryError::InvalidInput {
535                reason: format!(
536                    "expected {} dimensions, got {}",
537                    self.dimensions,
538                    vector.len()
539                ),
540            });
541        }
542
543        // Write lock serialises the full find→insert→remove composite so
544        // concurrent upserts for the same name cannot interleave. Reads
545        // (via `search`) use a read lock and are not blocked by other reads.
546        let mut scopes = self.scopes.write().expect("scopes lock poisoned");
547
548        // Ensure scope index exists (inline, since we already hold write lock).
549        if !scopes.contains_key(scope) {
550            scopes.insert(scope.clone(), Self::new_index(self.dimensions)?);
551        }
552
553        let scope_idx = scopes
554            .get(scope)
555            .expect("scope index must exist after insert");
556
557        // Capture old keys before inserting new ones.
558        let old_scope_key = scope_idx.find_key_by_name(&qualified_name);
559        let old_all_key = self.all.find_key_by_name(&qualified_name);
560
561        // Insert into scope index first.
562        let new_scope_key = scope_idx.add_with_next_key(vector, qualified_name.clone())?;
563
564        // Insert into all-index; if this fails, roll back scope insert atomically,
565        // restoring name_map to its pre-add state (so the old entry remains reachable).
566        let all_key = match self.all.add_with_next_key(vector, qualified_name.clone()) {
567            Ok(key) => key,
568            Err(e) => {
569                scope_idx.rollback_add(new_scope_key, old_scope_key, &qualified_name);
570                return Err(e);
571            }
572        };
573
574        // Both succeeded — now clean up old entries.
575        if let Some(key) = old_scope_key {
576            let _ = scope_idx.remove(key);
577        }
578        if let Some(key) = old_all_key {
579            let _ = self.all.remove(key);
580        }
581
582        // Record key_count (all-index size) after insertion.
583        span.record("key_count", self.all.key_count());
584
585        Ok(all_key)
586    }
587
588    fn remove(&self, scope: &Scope, qualified_name: &str) -> Result<(), MemoryError> {
589        let _span = tracing::debug_span!(
590            "index.remove",
591            scope = %scope.dir_prefix(),
592        )
593        .entered();
594
595        // Write lock serialises with concurrent adds for the same name.
596        let scopes = self.scopes.write().expect("scopes lock poisoned");
597
598        // Remove from scope index (best-effort).
599        if let Some(scope_idx) = scopes.get(scope) {
600            if let Some(key) = scope_idx.find_key_by_name(qualified_name) {
601                if let Err(e) = scope_idx.remove(key) {
602                    tracing::warn!(
603                        qualified_name = %qualified_name,
604                        error = %e,
605                        "scope index removal failed; continuing to all-index"
606                    );
607                }
608            }
609        }
610
611        // Remove from all-index (best-effort).
612        if let Some(key) = self.all.find_key_by_name(qualified_name) {
613            if let Err(e) = self.all.remove(key) {
614                tracing::warn!(
615                    qualified_name = %qualified_name,
616                    error = %e,
617                    "all-index removal failed"
618                );
619            }
620        }
621
622        Ok(())
623    }
624
625    fn search(
626        &self,
627        filter: &ScopeFilter,
628        query: &[f32],
629        limit: usize,
630    ) -> Result<Vec<(u64, String, f32)>, MemoryError> {
631        let dimensions = query.len();
632        let scope_str: Cow<'_, str> = match filter {
633            ScopeFilter::GlobalOnly => "global".into(),
634            ScopeFilter::All => "all".into(),
635            ScopeFilter::ProjectAndGlobal(p) => format!("project+global:{p}").into(),
636        };
637        let span = tracing::debug_span!(
638            "index.search",
639            scope = %scope_str,
640            dimensions,
641            key_count = self.all.key_count(),
642            count = tracing::field::Empty,
643        );
644        let _enter = span.enter();
645
646        if query.len() != self.dimensions {
647            return Err(MemoryError::InvalidInput {
648                reason: format!(
649                    "expected {} dimensions, got {}",
650                    self.dimensions,
651                    query.len()
652                ),
653            });
654        }
655
656        let results = match filter {
657            ScopeFilter::All => self.all.search(query, limit),
658
659            ScopeFilter::GlobalOnly => {
660                let scopes = self.scopes.read().expect("scopes lock poisoned");
661                match scopes.get(&Scope::Global) {
662                    Some(global_idx) => global_idx.search(query, limit),
663                    None => Ok(Vec::new()),
664                }
665            }
666
667            ScopeFilter::ProjectAndGlobal(project_name) => {
668                let scopes = self.scopes.read().expect("scopes lock poisoned");
669                let project_scope = Scope::Project(project_name.clone());
670
671                let mut combined: Vec<(u64, String, f32)> = Vec::new();
672
673                if let Some(global_idx) = scopes.get(&Scope::Global) {
674                    let mut global_results = global_idx.search(query, limit)?;
675                    combined.append(&mut global_results);
676                }
677
678                if let Some(proj_idx) = scopes.get(&project_scope) {
679                    let mut proj_results = proj_idx.search(query, limit)?;
680                    combined.append(&mut proj_results);
681                }
682
683                // Deduplicate by qualified name (HashSet ensures non-adjacent dupes are caught).
684                let mut seen = std::collections::HashSet::new();
685                combined.retain(|(_, name, _)| seen.insert(name.clone()));
686                // Sort by ascending distance and take top-k.
687                combined.sort_by(|a, b| a.2.partial_cmp(&b.2).unwrap_or(std::cmp::Ordering::Equal));
688                combined.truncate(limit);
689                Ok(combined)
690            }
691        };
692        if let Ok(ref r) = results {
693            span.record("count", r.len());
694        }
695        results
696    }
697
698    fn find_key_by_name(&self, qualified_name: &str) -> Option<u64> {
699        self.all.find_key_by_name(qualified_name)
700    }
701
702    fn save(&self, dir: &Path) -> Result<(), MemoryError> {
703        let span = tracing::debug_span!("index.save", key_count = tracing::field::Empty,);
704        let _enter = span.enter();
705
706        std::fs::create_dir_all(dir)?;
707
708        // Write a dirty marker — if we crash mid-save, the next load will see
709        // this and ignore commit SHAs (forcing a fresh rebuild).
710        let marker = dir.join(".save-in-progress");
711        std::fs::write(&marker, b"")?;
712
713        let result = (|| -> Result<(), MemoryError> {
714            // Acquire scopes read lock before accessing any indexes.
715            let scopes = self.scopes.read().expect("scopes lock poisoned");
716
717            // Persist all-index.
718            let all_dir = dir.join("all");
719            std::fs::create_dir_all(&all_dir)?;
720            self.all.save(&all_dir.join("index.usearch"))?;
721
722            // Persist per-scope indexes.
723            for (scope, idx) in scopes.iter() {
724                let scope_dir = dir.join(scope.dir_prefix());
725                std::fs::create_dir_all(&scope_dir)?;
726                idx.save(&scope_dir.join("index.usearch"))?;
727            }
728
729            // Record total key count (all-index is authoritative — it holds every entry).
730            let key_count = self.all.key_count();
731            span.record("key_count", key_count);
732
733            // scopes lock dropped at end of closure scope.
734            Ok(())
735        })();
736
737        // Always remove the marker — a transient I/O failure should not
738        // force a full reindex on next startup.
739        let _ = std::fs::remove_file(&marker);
740
741        result
742    }
743
744    fn commit_sha(&self) -> Option<String> {
745        self.all.commit_sha()
746    }
747
748    fn set_commit_sha(&self, sha: Option<&str>) {
749        let scopes = self.scopes.read().expect("scopes lock poisoned");
750        self.all.set_commit_sha(sha);
751        for idx in scopes.values() {
752            idx.set_commit_sha(sha);
753        }
754    }
755}
756
757impl<R: RawIndex> UsearchStoreInner<R> {
758    fn new_index(dimensions: usize) -> Result<VectorIndex<R>, MemoryError> {
759        VectorIndex::new(dimensions)
760    }
761}
762
763// ---------------------------------------------------------------------------
764// VectorStore implementation for UsearchStore
765// ---------------------------------------------------------------------------
766
767impl crate::index::sealed::Sealed for UsearchStore {}
768
769impl crate::index::VectorStore for UsearchStore {
770    fn add(
771        &self,
772        scope: &Scope,
773        vector: &[f32],
774        qualified_name: String,
775    ) -> Result<u64, MemoryError> {
776        self.inner.add(scope, vector, qualified_name)
777    }
778
779    fn remove(&self, scope: &Scope, qualified_name: &str) -> Result<(), MemoryError> {
780        self.inner.remove(scope, qualified_name)
781    }
782
783    fn search(
784        &self,
785        filter: &ScopeFilter,
786        query: &[f32],
787        limit: usize,
788    ) -> Result<Vec<(u64, String, f32)>, MemoryError> {
789        self.inner.search(filter, query, limit)
790    }
791
792    fn find_by_name(&self, qualified_name: &str) -> Option<u64> {
793        self.inner.find_key_by_name(qualified_name)
794    }
795
796    fn save(&self, dir: &Path) -> Result<(), MemoryError> {
797        self.inner.save(dir)
798    }
799
800    fn is_ready(&self) -> bool {
801        true
802    }
803
804    fn dimensions(&self) -> usize {
805        self.inner.dimensions
806    }
807
808    fn commit_sha(&self) -> Option<String> {
809        self.inner.commit_sha()
810    }
811
812    fn set_commit_sha(&self, sha: Option<&str>) {
813        self.inner.set_commit_sha(sha)
814    }
815}
816
817// ---------------------------------------------------------------------------
818// Tests
819// ---------------------------------------------------------------------------
820
821#[cfg(test)]
822mod tests {
823    use super::*;
824    use crate::index::VectorStore;
825
826    // -----------------------------------------------------------------------
827    // FailingRawIndex — test double for error injection
828    // -----------------------------------------------------------------------
829
830    /// Controls which operation `FailingRawIndex` will fail on.
831    #[derive(Debug, Clone, Copy, PartialEq)]
832    enum FailOn {
833        Add,
834        Remove,
835        Search,
836        Save,
837        Reserve,
838        None,
839    }
840
841    /// A `RawIndex` implementation that fails on the configured operation.
842    struct FailingRawIndex {
843        /// Underlying real index for operations that should succeed.
844        inner: Index,
845        /// Which operation to fail.
846        fail_on: FailOn,
847        /// Counter for tracking calls (for partial-failure scenarios).
848        call_count: Mutex<usize>,
849        /// Fail after this many successful calls (0 = always fail).
850        fail_after: usize,
851    }
852
853    impl FailingRawIndex {
854        fn new(dimensions: usize, fail_on: FailOn, fail_after: usize) -> Self {
855            let options = IndexOptions {
856                dimensions,
857                metric: MetricKind::Cos,
858                quantization: ScalarKind::F32,
859                ..Default::default()
860            };
861            let inner = Index::new(&options).expect("create failing index");
862            inner.reserve(1024).expect("reserve");
863            Self {
864                inner,
865                fail_on,
866                call_count: Mutex::new(0),
867                fail_after,
868            }
869        }
870
871        fn should_fail(&self, op: FailOn) -> bool {
872            if self.fail_on != op {
873                return false;
874            }
875            let mut count = self.call_count.lock().unwrap();
876            if self.fail_after == 0 || *count >= self.fail_after {
877                return true;
878            }
879            *count += 1;
880            false
881        }
882
883        fn injected_error(op: &str) -> RawIndexError {
884            format!("injected {op} failure").into()
885        }
886    }
887
888    impl RawIndex for FailingRawIndex {
889        fn create(dimensions: usize) -> Result<Self, RawIndexError> {
890            Ok(FailingRawIndex::new(dimensions, FailOn::None, 0))
891        }
892
893        fn add(&self, key: u64, vector: &[f32]) -> Result<(), RawIndexError> {
894            if self.should_fail(FailOn::Add) {
895                return Err(Self::injected_error("add"));
896            }
897            self.inner.add(key, vector).map_err(|e| e.into())
898        }
899
900        fn remove(&self, key: u64) -> Result<(), RawIndexError> {
901            if self.should_fail(FailOn::Remove) {
902                return Err(Self::injected_error("remove"));
903            }
904            self.inner.remove(key).map(|_| ()).map_err(|e| e.into())
905        }
906
907        fn search(&self, query: &[f32], count: usize) -> Result<RawSearchResults, RawIndexError> {
908            if self.should_fail(FailOn::Search) {
909                return Err(Self::injected_error("search"));
910            }
911            let m = self.inner.search(query, count)?;
912            Ok(RawSearchResults {
913                keys: m.keys,
914                distances: m.distances,
915            })
916        }
917
918        fn save(&self, path: &str) -> Result<(), RawIndexError> {
919            if self.should_fail(FailOn::Save) {
920                return Err(Self::injected_error("save"));
921            }
922            self.inner.save(path).map_err(|e| e.into())
923        }
924
925        fn reserve(&self, capacity: usize) -> Result<(), RawIndexError> {
926            if self.should_fail(FailOn::Reserve) {
927                return Err(Self::injected_error("reserve"));
928            }
929            self.inner.reserve(capacity).map_err(|e| e.into())
930        }
931
932        fn size(&self) -> usize {
933            self.inner.size()
934        }
935
936        fn capacity(&self) -> usize {
937            self.inner.capacity()
938        }
939    }
940
941    // Helper to build a VectorIndex<FailingRawIndex> directly.
942    fn make_failing_index(
943        dimensions: usize,
944        fail_on: FailOn,
945        fail_after: usize,
946    ) -> VectorIndex<FailingRawIndex> {
947        VectorIndex {
948            state: Mutex::new(VectorState {
949                index: FailingRawIndex::new(dimensions, fail_on, fail_after),
950                key_map: HashMap::new(),
951                name_map: HashMap::new(),
952                next_key: 0,
953                commit_sha: None,
954            }),
955            entry_count: AtomicUsize::new(0),
956        }
957    }
958
959    // A test-only variant of UsearchStoreInner that works over FailingRawIndex.
960    // We need this to test rollback.
961    struct FailableStore {
962        inner: UsearchStoreInner<FailingRawIndex>,
963    }
964
965    fn make_failable_store(
966        dimensions: usize,
967        all_fail_on: FailOn,
968        all_fail_after: usize,
969    ) -> FailableStore {
970        let all = make_failing_index(dimensions, all_fail_on, all_fail_after);
971        let scope = make_failing_index(dimensions, FailOn::None, 0);
972        let mut scopes = HashMap::new();
973        scopes.insert(Scope::Global, scope);
974        FailableStore {
975            inner: UsearchStoreInner {
976                scopes: RwLock::new(scopes),
977                all,
978                dimensions,
979            },
980        }
981    }
982
983    // -----------------------------------------------------------------------
984    // VectorIndex tests (low-level)
985    // -----------------------------------------------------------------------
986
987    fn make_index() -> VectorIndex {
988        VectorIndex::new(4).expect("failed to create index")
989    }
990
991    fn dummy_vec() -> Vec<f32> {
992        vec![1.0, 0.0, 0.0, 0.0]
993    }
994
995    /// Verify that `remove(old_key)` does NOT clobber `name_map` when an
996    /// upsert has already updated `name_map` to point to a newer key.
997    #[test]
998    fn remove_old_key_does_not_clobber_upserted_name_map_entry() {
999        let index = make_index();
1000        let v = dummy_vec();
1001
1002        let old_key = index
1003            .add_with_next_key(&v, "global/foo".to_string())
1004            .expect("first add failed");
1005        let new_key = index
1006            .add_with_next_key(&v, "global/foo".to_string())
1007            .expect("second add failed");
1008
1009        assert_ne!(old_key, new_key, "keys must differ");
1010
1011        index.remove(old_key).expect("remove failed");
1012
1013        assert_eq!(
1014            index.find_key_by_name("global/foo"),
1015            Some(new_key),
1016            "name_map entry for new_key was incorrectly removed"
1017        );
1018    }
1019
1020    #[test]
1021    fn remove_only_key_clears_name_map() {
1022        let index = make_index();
1023        let v = dummy_vec();
1024
1025        let key = index
1026            .add_with_next_key(&v, "global/bar".to_string())
1027            .expect("add failed");
1028
1029        index.remove(key).expect("remove failed");
1030
1031        assert_eq!(
1032            index.find_key_by_name("global/bar"),
1033            None,
1034            "name_map entry should have been cleared"
1035        );
1036    }
1037
1038    // -----------------------------------------------------------------------
1039    // UsearchStore tests — via VectorStore trait
1040    // -----------------------------------------------------------------------
1041
1042    fn make_store() -> UsearchStore {
1043        UsearchStore::new(8).expect("failed to create UsearchStore")
1044    }
1045
1046    fn vec_a() -> Vec<f32> {
1047        vec![1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1048    }
1049
1050    fn vec_b() -> Vec<f32> {
1051        vec![0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1052    }
1053
1054    fn vec_c() -> Vec<f32> {
1055        vec![0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]
1056    }
1057
1058    #[test]
1059    fn usearch_store_add_inserts_into_scope_and_all() {
1060        let si: &dyn VectorStore = &make_store();
1061        let scope = Scope::Global;
1062        let name = "global/memory-a".to_string();
1063
1064        si.add(&scope, &vec_a(), name.clone()).expect("add failed");
1065
1066        assert!(si.find_by_name(&name).is_some(), "should be in all-index");
1067
1068        let results = si
1069            .search(&ScopeFilter::GlobalOnly, &vec_a(), 5)
1070            .expect("search failed");
1071        assert!(
1072            results.iter().any(|(_, n, _)| n == &name),
1073            "should be found in global search"
1074        );
1075    }
1076
1077    #[test]
1078    fn usearch_store_remove_removes_from_both() {
1079        let si: &dyn VectorStore = &make_store();
1080        let scope = Scope::Global;
1081        let name = "global/memory-rm".to_string();
1082
1083        si.add(&scope, &vec_a(), name.clone()).expect("add failed");
1084        assert!(si.find_by_name(&name).is_some(), "should exist");
1085
1086        si.remove(&scope, &name).expect("remove failed");
1087
1088        assert!(
1089            si.find_by_name(&name).is_none(),
1090            "should be gone from all-index"
1091        );
1092
1093        let results = si
1094            .search(&ScopeFilter::GlobalOnly, &vec_a(), 5)
1095            .expect("search failed");
1096        assert!(
1097            !results.iter().any(|(_, n, _)| n == &name),
1098            "should not appear in global search after removal"
1099        );
1100    }
1101
1102    #[test]
1103    fn usearch_store_search_global_only() {
1104        let si: &dyn VectorStore = &make_store();
1105        let proj = Scope::Project("myproj".to_string());
1106
1107        si.add(&Scope::Global, &vec_a(), "global/mem-global".to_string())
1108            .expect("add global failed");
1109        si.add(&proj, &vec_b(), "projects/myproj/mem-proj".to_string())
1110            .expect("add project failed");
1111
1112        let results = si
1113            .search(&ScopeFilter::GlobalOnly, &vec_a(), 5)
1114            .expect("search failed");
1115
1116        let names: Vec<&str> = results.iter().map(|(_, n, _)| n.as_str()).collect();
1117        assert!(
1118            names.contains(&"global/mem-global"),
1119            "should contain global"
1120        );
1121        assert!(
1122            !names.contains(&"projects/myproj/mem-proj"),
1123            "should NOT contain project memory"
1124        );
1125    }
1126
1127    #[test]
1128    fn usearch_store_search_project_and_global() {
1129        let si: &dyn VectorStore = &make_store();
1130        let proj_a = Scope::Project("alpha".to_string());
1131        let proj_b = Scope::Project("beta".to_string());
1132
1133        si.add(&Scope::Global, &vec_a(), "global/g1".to_string())
1134            .expect("add global failed");
1135        si.add(&proj_a, &vec_b(), "projects/alpha/a1".to_string())
1136            .expect("add alpha failed");
1137        si.add(&proj_b, &vec_c(), "projects/beta/b1".to_string())
1138            .expect("add beta failed");
1139
1140        let results = si
1141            .search(
1142                &ScopeFilter::ProjectAndGlobal("alpha".to_string()),
1143                &vec_a(),
1144                10,
1145            )
1146            .expect("search failed");
1147
1148        let names: Vec<&str> = results.iter().map(|(_, n, _)| n.as_str()).collect();
1149        assert!(names.contains(&"global/g1"), "should contain global");
1150        assert!(names.contains(&"projects/alpha/a1"), "should contain alpha");
1151        assert!(
1152            !names.contains(&"projects/beta/b1"),
1153            "should NOT contain beta"
1154        );
1155    }
1156
1157    #[test]
1158    fn usearch_store_search_all() {
1159        let si: &dyn VectorStore = &make_store();
1160        let proj = Scope::Project("foo".to_string());
1161
1162        si.add(&Scope::Global, &vec_a(), "global/x".to_string())
1163            .expect("add global");
1164        si.add(&proj, &vec_b(), "projects/foo/y".to_string())
1165            .expect("add project");
1166
1167        let results = si
1168            .search(&ScopeFilter::All, &vec_a(), 10)
1169            .expect("search failed");
1170
1171        let names: Vec<&str> = results.iter().map(|(_, n, _)| n.as_str()).collect();
1172        assert!(names.contains(&"global/x"), "all should include global");
1173        assert!(
1174            names.contains(&"projects/foo/y"),
1175            "all should include project"
1176        );
1177    }
1178
1179    #[test]
1180    fn usearch_store_upsert_replaces_old_entry() {
1181        let si: &dyn VectorStore = &make_store();
1182        let name = "global/memo".to_string();
1183        si.add(&Scope::Global, &vec_a(), name.clone()).unwrap();
1184        si.add(&Scope::Global, &vec_b(), name.clone()).unwrap();
1185        let results = si.search(&ScopeFilter::All, &vec_b(), 10).unwrap();
1186        assert_eq!(
1187            results.iter().filter(|(_, n, _)| n == &name).count(),
1188            1,
1189            "upsert should leave exactly one entry for the name"
1190        );
1191    }
1192
1193    #[test]
1194    fn usearch_store_dirty_marker_discards_indexes() {
1195        let dir = tempfile::tempdir().expect("tempdir");
1196        let si = UsearchStore::new(8).expect("create");
1197        let store: &dyn VectorStore = &si;
1198        store
1199            .add(&Scope::Global, &vec_a(), "global/test-mem".to_string())
1200            .expect("add");
1201        store.set_commit_sha(Some("abc123"));
1202        store.save(dir.path()).expect("save");
1203
1204        std::fs::write(dir.path().join(".save-in-progress"), b"").unwrap();
1205
1206        let loaded = UsearchStore::load(dir.path(), 8).expect("load");
1207        let loaded: &dyn VectorStore = &loaded;
1208        assert!(
1209            loaded.commit_sha().is_none(),
1210            "dirty marker should result in no SHA"
1211        );
1212        assert!(
1213            loaded.find_by_name("global/test-mem").is_none(),
1214            "dirty marker should discard all indexed data"
1215        );
1216        assert!(
1217            !dir.path().join(".save-in-progress").exists(),
1218            "marker should be cleaned up"
1219        );
1220    }
1221
1222    #[test]
1223    fn usearch_store_save_load_round_trip() {
1224        let dir = tempfile::tempdir().expect("tempdir");
1225        let si = UsearchStore::new(8).expect("create");
1226        let store: &dyn VectorStore = &si;
1227        let proj = Scope::Project("rtrip".to_string());
1228
1229        store
1230            .add(&Scope::Global, &vec_a(), "global/rt-global".to_string())
1231            .expect("add global");
1232        store
1233            .add(&proj, &vec_b(), "projects/rtrip/rt-proj".to_string())
1234            .expect("add project");
1235
1236        store.save(dir.path()).expect("save failed");
1237
1238        let loaded = UsearchStore::load(dir.path(), 8).expect("load failed");
1239        let loaded: &dyn VectorStore = &loaded;
1240
1241        assert!(
1242            loaded.find_by_name("global/rt-global").is_some(),
1243            "global memory should survive round-trip"
1244        );
1245        assert!(
1246            loaded.find_by_name("projects/rtrip/rt-proj").is_some(),
1247            "project memory should survive round-trip"
1248        );
1249
1250        let results = loaded
1251            .search(
1252                &ScopeFilter::ProjectAndGlobal("rtrip".to_string()),
1253                &vec_a(),
1254                10,
1255            )
1256            .expect("search failed");
1257        let names: Vec<&str> = results.iter().map(|(_, n, _)| n.as_str()).collect();
1258        assert!(names.contains(&"global/rt-global"));
1259        assert!(names.contains(&"projects/rtrip/rt-proj"));
1260    }
1261
1262    #[test]
1263    fn usearch_store_same_short_name_different_scopes_coexist() {
1264        let si: &dyn VectorStore = &make_store();
1265        si.add(&Scope::Global, &vec_a(), "global/foo".to_string())
1266            .unwrap();
1267        si.add(
1268            &Scope::Project("p".into()),
1269            &vec_b(),
1270            "projects/p/foo".to_string(),
1271        )
1272        .unwrap();
1273        assert!(si.find_by_name("global/foo").is_some());
1274        assert!(si.find_by_name("projects/p/foo").is_some());
1275        assert_ne!(
1276            si.find_by_name("global/foo"),
1277            si.find_by_name("projects/p/foo"),
1278            "different scopes should have distinct keys"
1279        );
1280    }
1281
1282    // -----------------------------------------------------------------------
1283    // TC-03: FailingRawIndex is injectable
1284    // -----------------------------------------------------------------------
1285
1286    #[test]
1287    fn tc03_failing_raw_index_is_injectable() {
1288        let idx = make_failing_index(4, FailOn::Add, 0);
1289        let v = vec![1.0_f32, 0.0, 0.0, 0.0];
1290        let result = idx.add_with_next_key(&v, "test/name".to_string());
1291        assert!(
1292            result.is_err(),
1293            "FailingRawIndex with FailOn::Add should return error"
1294        );
1295    }
1296
1297    // -----------------------------------------------------------------------
1298    // TC-04a: Rollback on all-index failure — scope index entry count unchanged
1299    // -----------------------------------------------------------------------
1300
1301    #[test]
1302    fn tc04a_rollback_on_all_index_failure_scope_count_unchanged() {
1303        // Build a store where the all-index fails immediately.
1304        let fs = make_failable_store(8, FailOn::Add, 0);
1305
1306        let scope = Scope::Global;
1307        let name = "global/rollback-test".to_string();
1308
1309        // This add should fail because all-index.add fails.
1310        let result = fs.inner.add(&scope, &vec_a(), name.clone());
1311        assert!(result.is_err(), "add should fail when all-index fails");
1312
1313        // Scope index should be empty — rollback removed the entry.
1314        let scopes = fs.inner.scopes.read().unwrap();
1315        let scope_idx = scopes.get(&scope).expect("global scope must exist");
1316        assert_eq!(
1317            scope_idx.key_count(),
1318            0,
1319            "TC-04a: scope index entry count should be 0 after rollback (was: {})",
1320            scope_idx.key_count()
1321        );
1322    }
1323
1324    // -----------------------------------------------------------------------
1325    // TC-04b: Rollback doesn't corrupt existing entries
1326    // -----------------------------------------------------------------------
1327
1328    #[test]
1329    fn tc04b_rollback_does_not_corrupt_existing_entries() {
1330        // all-index: first add succeeds, second fails.
1331        let fs = make_failable_store(8, FailOn::Add, 1);
1332
1333        let scope = Scope::Global;
1334
1335        // First add should succeed (fail_after=1 means first call succeeds).
1336        let first_name = "global/existing".to_string();
1337        fs.inner
1338            .add(&scope, &vec_a(), first_name.clone())
1339            .expect("first add should succeed");
1340
1341        // Second add should fail (all-index now fails).
1342        let second_name = "global/failing".to_string();
1343        let result = fs.inner.add(&scope, &vec_b(), second_name.clone());
1344        assert!(result.is_err(), "second add should fail");
1345
1346        // The first entry must still be findable.
1347        assert!(
1348            fs.inner.find_key_by_name(&first_name).is_some(),
1349            "TC-04b: existing entry should not be corrupted by rollback"
1350        );
1351        assert!(
1352            fs.inner.find_key_by_name(&second_name).is_none(),
1353            "TC-04b: failed entry should not be in the index"
1354        );
1355    }
1356
1357    // -----------------------------------------------------------------------
1358    // TC-04c: Upsert rollback — original entry survives when all-index fails
1359    // -----------------------------------------------------------------------
1360
1361    #[test]
1362    fn tc04c_upsert_rollback_preserves_original_entry() {
1363        // Seed one successful entry, then re-add the same name with the
1364        // all-index configured to fail on the second call.  The original
1365        // entry must still be reachable via find_key_by_name afterwards.
1366        let fs = make_failable_store(8, FailOn::Add, 1);
1367
1368        let scope = Scope::Global;
1369        let name = "global/upsert-rollback".to_string();
1370
1371        // First add: both scope and all-index succeed (fail_after=1).
1372        let original_key = fs
1373            .inner
1374            .add(&scope, &vec_a(), name.clone())
1375            .expect("TC-04c: first add should succeed");
1376
1377        // Second add (upsert): all-index fails on this call → rollback.
1378        let result = fs.inner.add(&scope, &vec_b(), name.clone());
1379        assert!(
1380            result.is_err(),
1381            "TC-04c: second add should fail when all-index fails"
1382        );
1383
1384        // The original entry must still be reachable.
1385        let key_after = fs.inner.find_key_by_name(&name);
1386        assert_eq!(
1387            key_after,
1388            Some(original_key),
1389            "TC-04c: original entry must survive upsert rollback (expected key {original_key}, got {key_after:?})"
1390        );
1391    }
1392
1393    // -----------------------------------------------------------------------
1394    // TC-05a: Errors are MemoryError variants
1395    // -----------------------------------------------------------------------
1396
1397    #[test]
1398    fn tc05a_errors_are_memory_error_variants() {
1399        let idx = make_failing_index(4, FailOn::Add, 0);
1400        let v = vec![1.0_f32, 0.0, 0.0, 0.0];
1401        let err = idx
1402            .add_with_next_key(&v, "test/name".to_string())
1403            .unwrap_err();
1404        // Should be MemoryError::Index variant.
1405        assert!(
1406            matches!(err, MemoryError::Index(_)),
1407            "TC-05a: error should be MemoryError::Index, got: {:?}",
1408            err
1409        );
1410    }
1411
1412    // -----------------------------------------------------------------------
1413    // TC-05b: Error Display has no usearch internals
1414    // -----------------------------------------------------------------------
1415
1416    #[test]
1417    fn tc05b_error_display_has_no_raw_usearch_type_names() {
1418        let idx = make_failing_index(4, FailOn::Add, 0);
1419        let v = vec![1.0_f32, 0.0, 0.0, 0.0];
1420        let err = idx
1421            .add_with_next_key(&v, "test/name".to_string())
1422            .unwrap_err();
1423        let display = format!("{}", err);
1424        // The display should contain our wrapper prefix, not raw usearch types.
1425        assert!(
1426            display.contains("index error"),
1427            "TC-05b: display should contain 'index error', got: {}",
1428            display
1429        );
1430        assert!(
1431            !display.contains("usearch") && !display.contains("cxx::Exception"),
1432            "TC-05b: display must not leak raw backend type names, got: {}",
1433            display
1434        );
1435    }
1436
1437    // -----------------------------------------------------------------------
1438    // TC-05b (real): Dimension mismatch produces clean MemoryError
1439    // -----------------------------------------------------------------------
1440
1441    #[test]
1442    fn tc05b_dimension_mismatch_error_is_clean() {
1443        let store = UsearchStore::new(8).expect("create");
1444        let wrong_dims = vec![1.0_f32, 0.0, 0.0]; // 3 dims, store expects 8
1445        let err = store
1446            .inner
1447            .add(&Scope::Global, &wrong_dims, "global/bad-dims".to_string())
1448            .unwrap_err();
1449        let display = format!("{}", err);
1450        assert!(
1451            !display.contains("usearch") && !display.contains("cxx::Exception"),
1452            "error display must not leak backend type names, got: {}",
1453            display
1454        );
1455        assert!(
1456            matches!(err, MemoryError::InvalidInput { .. }),
1457            "dimension mismatch should return InvalidInput, got: {:?}",
1458            err
1459        );
1460    }
1461
1462    // -----------------------------------------------------------------------
1463    // TC-06a: UsearchStore::is_ready() returns true
1464    // -----------------------------------------------------------------------
1465
1466    #[test]
1467    fn tc06a_usearch_store_is_ready() {
1468        let store = UsearchStore::new(4).expect("create");
1469        assert!(
1470            store.is_ready(),
1471            "TC-06a: UsearchStore::is_ready() should return true"
1472        );
1473    }
1474}