Skip to main content

hermes_core/index/
mod.rs

1//! Index - multi-segment async search index
2//!
3//! The `Index` is the central concept that provides:
4//! - `Index::create()` / `Index::open()` - create or open an index
5//! - `index.writer()` - get an IndexWriter for adding documents
6//! - `index.reader()` - get an IndexReader for searching (with reload policy)
7//!
8//! The Index owns the SegmentManager which handles segment lifecycle and tracking.
9
10#[cfg(feature = "native")]
11use crate::dsl::Schema;
12#[cfg(feature = "native")]
13use crate::error::Result;
14#[cfg(feature = "native")]
15use crate::structures::{CoarseCentroids, PQCodebook};
16#[cfg(feature = "native")]
17use rustc_hash::FxHashMap;
18#[cfg(feature = "native")]
19use std::sync::Arc;
20
21mod searcher;
22pub use searcher::Searcher;
23
24#[cfg(feature = "native")]
25mod reader;
26#[cfg(feature = "native")]
27mod vector_builder;
28#[cfg(feature = "native")]
29mod writer;
30#[cfg(feature = "native")]
31pub use reader::IndexReader;
32#[cfg(feature = "native")]
33pub use writer::IndexWriter;
34
35mod metadata;
36pub use metadata::{FieldVectorMeta, INDEX_META_FILENAME, IndexMetadata, VectorIndexState};
37
38#[cfg(feature = "native")]
39mod helpers;
40#[cfg(feature = "native")]
41pub use helpers::{
42    IndexingStats, SchemaConfig, SchemaFieldConfig, create_index_at_path, create_index_from_sdl,
43    index_documents_from_reader, index_json_document, parse_schema,
44};
45
46/// Default file name for the slice cache
47pub const SLICE_CACHE_FILENAME: &str = "index.slicecache";
48
49/// Index configuration
50#[derive(Debug, Clone)]
51pub struct IndexConfig {
52    /// Number of threads for CPU-intensive tasks (search parallelism)
53    pub num_threads: usize,
54    /// Number of parallel segment builders (documents distributed round-robin)
55    pub num_indexing_threads: usize,
56    /// Number of threads for parallel block compression within each segment
57    pub num_compression_threads: usize,
58    /// Block cache size for term dictionary per segment
59    pub term_cache_blocks: usize,
60    /// Block cache size for document store per segment
61    pub store_cache_blocks: usize,
62    /// Max memory (bytes) across all builders before auto-commit (global limit)
63    pub max_indexing_memory_bytes: usize,
64    /// Merge policy for background segment merging
65    pub merge_policy: Box<dyn crate::merge::MergePolicy>,
66    /// Index optimization mode (adaptive, size-optimized, performance-optimized)
67    pub optimization: crate::structures::IndexOptimization,
68    /// Reload interval in milliseconds for IndexReader (how often to check for new segments)
69    pub reload_interval_ms: u64,
70}
71
72impl Default for IndexConfig {
73    fn default() -> Self {
74        #[cfg(feature = "native")]
75        let cpus = num_cpus::get().max(1);
76        #[cfg(not(feature = "native"))]
77        let cpus = 1;
78
79        Self {
80            num_threads: cpus,
81            num_indexing_threads: 1,
82            num_compression_threads: cpus,
83            term_cache_blocks: 256,
84            store_cache_blocks: 32,
85            max_indexing_memory_bytes: 256 * 1024 * 1024, // 256 MB default
86            merge_policy: Box::new(crate::merge::TieredMergePolicy::default()),
87            optimization: crate::structures::IndexOptimization::default(),
88            reload_interval_ms: 1000, // 1 second default
89        }
90    }
91}
92
93/// Multi-segment async Index
94///
95/// The central concept for search. Owns segment lifecycle and provides:
96/// - `Index::create()` / `Index::open()` - create or open an index
97/// - `index.writer()` - get an IndexWriter for adding documents
98/// - `index.reader()` - get an IndexReader for searching with reload policy
99///
100/// All segment management is delegated to SegmentManager.
101#[cfg(feature = "native")]
102pub struct Index<D: crate::directories::DirectoryWriter + 'static> {
103    directory: Arc<D>,
104    schema: Arc<Schema>,
105    config: IndexConfig,
106    /// Segment manager - owns segments, tracker, and metadata
107    segment_manager: Arc<crate::merge::SegmentManager<D>>,
108    /// Trained centroids for vector search
109    trained_centroids: FxHashMap<u32, Arc<CoarseCentroids>>,
110    /// Trained codebooks for vector search
111    trained_codebooks: FxHashMap<u32, Arc<PQCodebook>>,
112    /// Cached reader (created lazily, reused across calls)
113    cached_reader: tokio::sync::OnceCell<IndexReader<D>>,
114}
115
116#[cfg(feature = "native")]
117impl<D: crate::directories::DirectoryWriter + 'static> Index<D> {
118    /// Create a new index in the directory
119    pub async fn create(directory: D, schema: Schema, config: IndexConfig) -> Result<Self> {
120        let directory = Arc::new(directory);
121        let schema = Arc::new(schema);
122        let metadata = IndexMetadata::new((*schema).clone());
123
124        let segment_manager = Arc::new(crate::merge::SegmentManager::new(
125            Arc::clone(&directory),
126            Arc::clone(&schema),
127            metadata,
128            config.merge_policy.clone_box(),
129            config.term_cache_blocks,
130        ));
131
132        // Save initial metadata
133        segment_manager.update_metadata(|_| {}).await?;
134
135        Ok(Self {
136            directory,
137            schema,
138            config,
139            segment_manager,
140            trained_centroids: FxHashMap::default(),
141            trained_codebooks: FxHashMap::default(),
142            cached_reader: tokio::sync::OnceCell::new(),
143        })
144    }
145
146    /// Open an existing index from a directory
147    pub async fn open(directory: D, config: IndexConfig) -> Result<Self> {
148        let directory = Arc::new(directory);
149
150        // Load metadata (includes schema)
151        let metadata = IndexMetadata::load(directory.as_ref()).await?;
152        let schema = Arc::new(metadata.schema.clone());
153
154        // Load trained structures
155        let trained = metadata.load_trained_structures(directory.as_ref()).await;
156        let trained_centroids = trained
157            .as_ref()
158            .map(|t| t.centroids.clone())
159            .unwrap_or_default();
160        let trained_codebooks = trained
161            .as_ref()
162            .map(|t| t.codebooks.clone())
163            .unwrap_or_default();
164
165        log::info!(
166            "[Index::open] trained_centroids fields={:?}, trained_codebooks fields={:?}",
167            trained_centroids.keys().collect::<Vec<_>>(),
168            trained_codebooks.keys().collect::<Vec<_>>(),
169        );
170
171        let segment_manager = Arc::new(crate::merge::SegmentManager::new(
172            Arc::clone(&directory),
173            Arc::clone(&schema),
174            metadata,
175            config.merge_policy.clone_box(),
176            config.term_cache_blocks,
177        ));
178
179        Ok(Self {
180            directory,
181            schema,
182            config,
183            segment_manager,
184            trained_centroids,
185            trained_codebooks,
186            cached_reader: tokio::sync::OnceCell::new(),
187        })
188    }
189
190    /// Get the schema
191    pub fn schema(&self) -> &Schema {
192        &self.schema
193    }
194
195    /// Get a reference to the underlying directory
196    pub fn directory(&self) -> &D {
197        &self.directory
198    }
199
200    /// Get the segment manager
201    pub fn segment_manager(&self) -> &Arc<crate::merge::SegmentManager<D>> {
202        &self.segment_manager
203    }
204
205    /// Get an IndexReader for searching (with reload policy)
206    ///
207    /// The reader is cached and reused across calls. The reader's internal
208    /// searcher will reload segments based on its reload interval (configurable via IndexConfig).
209    pub async fn reader(&self) -> Result<&IndexReader<D>> {
210        self.cached_reader
211            .get_or_try_init(|| async {
212                IndexReader::from_segment_manager_with_reload_interval(
213                    Arc::clone(&self.schema),
214                    Arc::clone(&self.segment_manager),
215                    self.config.term_cache_blocks,
216                    self.config.reload_interval_ms,
217                )
218                .await
219            })
220            .await
221    }
222
223    /// Get the config
224    pub fn config(&self) -> &IndexConfig {
225        &self.config
226    }
227
228    /// Get trained centroids
229    pub fn trained_centroids(&self) -> &FxHashMap<u32, Arc<CoarseCentroids>> {
230        &self.trained_centroids
231    }
232
233    /// Get trained codebooks
234    pub fn trained_codebooks(&self) -> &FxHashMap<u32, Arc<PQCodebook>> {
235        &self.trained_codebooks
236    }
237
238    /// Get segment readers for query execution (convenience method)
239    pub async fn segment_readers(&self) -> Result<Vec<Arc<crate::segment::SegmentReader>>> {
240        let reader = self.reader().await?;
241        let searcher = reader.searcher().await?;
242        Ok(searcher.segment_readers().to_vec())
243    }
244
245    /// Total number of documents across all segments
246    pub async fn num_docs(&self) -> Result<u32> {
247        let reader = self.reader().await?;
248        let searcher = reader.searcher().await?;
249        Ok(searcher.num_docs())
250    }
251
252    /// Get a document by global doc_id
253    pub async fn doc(&self, doc_id: crate::DocId) -> Result<Option<crate::dsl::Document>> {
254        let reader = self.reader().await?;
255        let searcher = reader.searcher().await?;
256        searcher.doc(doc_id).await
257    }
258
259    /// Get default fields for search
260    pub fn default_fields(&self) -> Vec<crate::Field> {
261        if !self.schema.default_fields().is_empty() {
262            self.schema.default_fields().to_vec()
263        } else {
264            self.schema
265                .fields()
266                .filter(|(_, entry)| {
267                    entry.indexed && entry.field_type == crate::dsl::FieldType::Text
268                })
269                .map(|(field, _)| field)
270                .collect()
271        }
272    }
273
274    /// Get tokenizer registry
275    pub fn tokenizers(&self) -> Arc<crate::tokenizer::TokenizerRegistry> {
276        Arc::new(crate::tokenizer::TokenizerRegistry::default())
277    }
278
279    /// Create a query parser for this index
280    pub fn query_parser(&self) -> crate::dsl::QueryLanguageParser {
281        let default_fields = self.default_fields();
282        let tokenizers = self.tokenizers();
283
284        let query_routers = self.schema.query_routers();
285        if !query_routers.is_empty()
286            && let Ok(router) = crate::dsl::QueryFieldRouter::from_rules(query_routers)
287        {
288            return crate::dsl::QueryLanguageParser::with_router(
289                Arc::clone(&self.schema),
290                default_fields,
291                tokenizers,
292                router,
293            );
294        }
295
296        crate::dsl::QueryLanguageParser::new(Arc::clone(&self.schema), default_fields, tokenizers)
297    }
298
299    /// Parse and search using a query string
300    pub async fn query(
301        &self,
302        query_str: &str,
303        limit: usize,
304    ) -> Result<crate::query::SearchResponse> {
305        self.query_offset(query_str, limit, 0).await
306    }
307
308    /// Query with offset for pagination
309    pub async fn query_offset(
310        &self,
311        query_str: &str,
312        limit: usize,
313        offset: usize,
314    ) -> Result<crate::query::SearchResponse> {
315        let parser = self.query_parser();
316        let query = parser
317            .parse(query_str)
318            .map_err(crate::error::Error::Query)?;
319        self.search_offset(query.as_ref(), limit, offset).await
320    }
321
322    /// Search and return results
323    pub async fn search(
324        &self,
325        query: &dyn crate::query::Query,
326        limit: usize,
327    ) -> Result<crate::query::SearchResponse> {
328        self.search_offset(query, limit, 0).await
329    }
330
331    /// Search with offset for pagination
332    pub async fn search_offset(
333        &self,
334        query: &dyn crate::query::Query,
335        limit: usize,
336        offset: usize,
337    ) -> Result<crate::query::SearchResponse> {
338        let reader = self.reader().await?;
339        let searcher = reader.searcher().await?;
340        let segments = searcher.segment_readers();
341
342        let fetch_limit = offset + limit;
343
344        let futures: Vec<_> = segments
345            .iter()
346            .map(|segment| {
347                let sid = segment.meta().id;
348                async move {
349                    let results =
350                        crate::query::search_segment(segment.as_ref(), query, fetch_limit).await?;
351                    Ok::<_, crate::error::Error>(
352                        results
353                            .into_iter()
354                            .map(move |r| (sid, r))
355                            .collect::<Vec<_>>(),
356                    )
357                }
358            })
359            .collect();
360
361        let batches = futures::future::try_join_all(futures).await?;
362        let mut all_results: Vec<(u128, crate::query::SearchResult)> =
363            Vec::with_capacity(batches.iter().map(|b| b.len()).sum());
364        for batch in batches {
365            all_results.extend(batch);
366        }
367
368        all_results.sort_by(|a, b| {
369            b.1.score
370                .partial_cmp(&a.1.score)
371                .unwrap_or(std::cmp::Ordering::Equal)
372        });
373
374        let total_hits = all_results.len() as u32;
375
376        let hits: Vec<crate::query::SearchHit> = all_results
377            .into_iter()
378            .skip(offset)
379            .take(limit)
380            .map(|(segment_id, result)| crate::query::SearchHit {
381                address: crate::query::DocAddress::new(segment_id, result.doc_id),
382                score: result.score,
383                matched_fields: result.extract_ordinals(),
384            })
385            .collect();
386
387        Ok(crate::query::SearchResponse { hits, total_hits })
388    }
389
390    /// Get a document by its unique address
391    pub async fn get_document(
392        &self,
393        address: &crate::query::DocAddress,
394    ) -> Result<Option<crate::dsl::Document>> {
395        let segment_id = address.segment_id_u128().ok_or_else(|| {
396            crate::error::Error::Query(format!("Invalid segment ID: {}", address.segment_id))
397        })?;
398
399        let reader = self.reader().await?;
400        let searcher = reader.searcher().await?;
401
402        for segment in searcher.segment_readers() {
403            if segment.meta().id == segment_id {
404                // Convert global doc_id to segment-local doc_id
405                let local_doc_id = address.doc_id.wrapping_sub(segment.doc_id_offset());
406                return segment.doc(local_doc_id).await;
407            }
408        }
409
410        Ok(None)
411    }
412
413    /// Reload is no longer needed - reader handles this automatically
414    pub async fn reload(&self) -> Result<()> {
415        // No-op - reader reloads automatically based on policy
416        Ok(())
417    }
418
419    /// Get posting lists for a term across all segments
420    pub async fn get_postings(
421        &self,
422        field: crate::Field,
423        term: &[u8],
424    ) -> Result<
425        Vec<(
426            Arc<crate::segment::SegmentReader>,
427            crate::structures::BlockPostingList,
428        )>,
429    > {
430        let segments = self.segment_readers().await?;
431        let mut results = Vec::new();
432
433        for segment in segments {
434            if let Some(postings) = segment.get_postings(field, term).await? {
435                results.push((segment, postings));
436            }
437        }
438
439        Ok(results)
440    }
441}
442
443/// Native-only methods for Index
444#[cfg(feature = "native")]
445impl<D: crate::directories::DirectoryWriter + 'static> Index<D> {
446    /// Get an IndexWriter for adding documents
447    pub fn writer(&self) -> writer::IndexWriter<D> {
448        writer::IndexWriter::from_index(self)
449    }
450}
451
452// TODO: Add back warmup_and_save_slice_cache when slice caching is re-integrated
453
454#[cfg(test)]
455mod tests {
456    use super::*;
457    use crate::directories::RamDirectory;
458    use crate::dsl::{Document, SchemaBuilder};
459
460    #[tokio::test]
461    async fn test_index_create_and_search() {
462        let mut schema_builder = SchemaBuilder::default();
463        let title = schema_builder.add_text_field("title", true, true);
464        let body = schema_builder.add_text_field("body", true, true);
465        let schema = schema_builder.build();
466
467        let dir = RamDirectory::new();
468        let config = IndexConfig::default();
469
470        // Create index and add documents
471        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
472            .await
473            .unwrap();
474
475        let mut doc1 = Document::new();
476        doc1.add_text(title, "Hello World");
477        doc1.add_text(body, "This is the first document");
478        writer.add_document(doc1).unwrap();
479
480        let mut doc2 = Document::new();
481        doc2.add_text(title, "Goodbye World");
482        doc2.add_text(body, "This is the second document");
483        writer.add_document(doc2).unwrap();
484
485        writer.commit().await.unwrap();
486
487        // Open for reading
488        let index = Index::open(dir, config).await.unwrap();
489        assert_eq!(index.num_docs().await.unwrap(), 2);
490
491        // Check postings
492        let postings = index.get_postings(title, b"world").await.unwrap();
493        assert_eq!(postings.len(), 1); // One segment
494        assert_eq!(postings[0].1.doc_count(), 2); // Two docs with "world"
495
496        // Retrieve document
497        let doc = index.doc(0).await.unwrap().unwrap();
498        assert_eq!(doc.get_first(title).unwrap().as_text(), Some("Hello World"));
499    }
500
501    #[tokio::test]
502    async fn test_multiple_segments() {
503        let mut schema_builder = SchemaBuilder::default();
504        let title = schema_builder.add_text_field("title", true, true);
505        let schema = schema_builder.build();
506
507        let dir = RamDirectory::new();
508        let config = IndexConfig {
509            max_indexing_memory_bytes: 1024, // Very small to trigger frequent flushes
510            ..Default::default()
511        };
512
513        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
514            .await
515            .unwrap();
516
517        // Add documents in batches to create multiple segments
518        for batch in 0..3 {
519            for i in 0..5 {
520                let mut doc = Document::new();
521                doc.add_text(title, format!("Document {} batch {}", i, batch));
522                writer.add_document(doc).unwrap();
523            }
524            writer.commit().await.unwrap();
525        }
526
527        // Open and check
528        let index = Index::open(dir, config).await.unwrap();
529        assert_eq!(index.num_docs().await.unwrap(), 15);
530        // With queue-based indexing, exact segment count varies
531        assert!(
532            index.segment_readers().await.unwrap().len() >= 2,
533            "Expected multiple segments"
534        );
535    }
536
537    #[tokio::test]
538    async fn test_segment_merge() {
539        let mut schema_builder = SchemaBuilder::default();
540        let title = schema_builder.add_text_field("title", true, true);
541        let schema = schema_builder.build();
542
543        let dir = RamDirectory::new();
544        let config = IndexConfig {
545            max_indexing_memory_bytes: 512, // Very small to trigger frequent flushes
546            ..Default::default()
547        };
548
549        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
550            .await
551            .unwrap();
552
553        // Create multiple segments by flushing between batches
554        for batch in 0..3 {
555            for i in 0..3 {
556                let mut doc = Document::new();
557                doc.add_text(title, format!("Document {} batch {}", i, batch));
558                writer.add_document(doc).unwrap();
559            }
560            writer.flush().await.unwrap();
561        }
562        writer.commit().await.unwrap();
563
564        // Should have multiple segments (at least 2, one per flush with docs)
565        let index = Index::open(dir.clone(), config.clone()).await.unwrap();
566        assert!(
567            index.segment_readers().await.unwrap().len() >= 2,
568            "Expected multiple segments"
569        );
570
571        // Force merge
572        let writer = IndexWriter::open(dir.clone(), config.clone())
573            .await
574            .unwrap();
575        writer.force_merge().await.unwrap();
576
577        // Should have 1 segment now
578        let index = Index::open(dir, config).await.unwrap();
579        assert_eq!(index.segment_readers().await.unwrap().len(), 1);
580        assert_eq!(index.num_docs().await.unwrap(), 9);
581
582        // Verify all documents accessible (order may vary with queue-based indexing)
583        let mut found_docs = 0;
584        for i in 0..9 {
585            if index.doc(i).await.unwrap().is_some() {
586                found_docs += 1;
587            }
588        }
589        assert_eq!(found_docs, 9);
590    }
591
592    #[tokio::test]
593    async fn test_match_query() {
594        let mut schema_builder = SchemaBuilder::default();
595        let title = schema_builder.add_text_field("title", true, true);
596        let body = schema_builder.add_text_field("body", true, true);
597        let schema = schema_builder.build();
598
599        let dir = RamDirectory::new();
600        let config = IndexConfig::default();
601
602        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
603            .await
604            .unwrap();
605
606        let mut doc1 = Document::new();
607        doc1.add_text(title, "rust programming");
608        doc1.add_text(body, "Learn rust language");
609        writer.add_document(doc1).unwrap();
610
611        let mut doc2 = Document::new();
612        doc2.add_text(title, "python programming");
613        doc2.add_text(body, "Learn python language");
614        writer.add_document(doc2).unwrap();
615
616        writer.commit().await.unwrap();
617
618        let index = Index::open(dir, config).await.unwrap();
619
620        // Test match query with multiple default fields
621        let results = index.query("rust", 10).await.unwrap();
622        assert_eq!(results.hits.len(), 1);
623
624        // Test match query with multiple tokens
625        let results = index.query("rust programming", 10).await.unwrap();
626        assert!(!results.hits.is_empty());
627
628        // Verify hit has address (segment_id + doc_id)
629        let hit = &results.hits[0];
630        assert!(!hit.address.segment_id.is_empty(), "Should have segment_id");
631
632        // Verify document retrieval by address
633        let doc = index.get_document(&hit.address).await.unwrap().unwrap();
634        assert!(
635            !doc.field_values().is_empty(),
636            "Doc should have field values"
637        );
638
639        // Also verify doc retrieval directly by global doc_id
640        let doc = index.doc(0).await.unwrap().unwrap();
641        assert!(
642            !doc.field_values().is_empty(),
643            "Doc should have field values"
644        );
645    }
646
647    #[tokio::test]
648    async fn test_slice_cache_warmup_and_load() {
649        use crate::directories::SliceCachingDirectory;
650
651        let mut schema_builder = SchemaBuilder::default();
652        let title = schema_builder.add_text_field("title", true, true);
653        let body = schema_builder.add_text_field("body", true, true);
654        let schema = schema_builder.build();
655
656        let dir = RamDirectory::new();
657        let config = IndexConfig::default();
658
659        // Create index with some documents
660        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
661            .await
662            .unwrap();
663
664        for i in 0..10 {
665            let mut doc = Document::new();
666            doc.add_text(title, format!("Document {} about rust", i));
667            doc.add_text(body, format!("This is body text number {}", i));
668            writer.add_document(doc).unwrap();
669        }
670        writer.commit().await.unwrap();
671
672        // Open with slice caching and perform some operations to warm up cache
673        let caching_dir = SliceCachingDirectory::new(dir.clone(), 1024 * 1024);
674        let index = Index::open(caching_dir, config.clone()).await.unwrap();
675
676        // Perform a search to warm up the cache
677        let results = index.query("rust", 10).await.unwrap();
678        assert!(!results.hits.is_empty());
679
680        // Check cache stats - should have cached some data
681        let stats = index.directory.stats();
682        assert!(stats.total_bytes > 0, "Cache should have data after search");
683    }
684
685    #[tokio::test]
686    async fn test_multivalue_field_indexing_and_search() {
687        let mut schema_builder = SchemaBuilder::default();
688        let uris = schema_builder.add_text_field("uris", true, true);
689        let title = schema_builder.add_text_field("title", true, true);
690        let schema = schema_builder.build();
691
692        let dir = RamDirectory::new();
693        let config = IndexConfig::default();
694
695        // Create index and add document with multi-value field
696        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
697            .await
698            .unwrap();
699
700        let mut doc = Document::new();
701        doc.add_text(uris, "one");
702        doc.add_text(uris, "two");
703        doc.add_text(title, "Test Document");
704        writer.add_document(doc).unwrap();
705
706        // Add another document with different uris
707        let mut doc2 = Document::new();
708        doc2.add_text(uris, "three");
709        doc2.add_text(title, "Another Document");
710        writer.add_document(doc2).unwrap();
711
712        writer.commit().await.unwrap();
713
714        // Open for reading
715        let index = Index::open(dir, config).await.unwrap();
716        assert_eq!(index.num_docs().await.unwrap(), 2);
717
718        // Verify document retrieval preserves all values
719        let doc = index.doc(0).await.unwrap().unwrap();
720        let all_uris: Vec<_> = doc.get_all(uris).collect();
721        assert_eq!(all_uris.len(), 2, "Should have 2 uris values");
722        assert_eq!(all_uris[0].as_text(), Some("one"));
723        assert_eq!(all_uris[1].as_text(), Some("two"));
724
725        // Verify to_json returns array for multi-value field
726        let json = doc.to_json(index.schema());
727        let uris_json = json.get("uris").unwrap();
728        assert!(uris_json.is_array(), "Multi-value field should be an array");
729        let uris_arr = uris_json.as_array().unwrap();
730        assert_eq!(uris_arr.len(), 2);
731        assert_eq!(uris_arr[0].as_str(), Some("one"));
732        assert_eq!(uris_arr[1].as_str(), Some("two"));
733
734        // Verify both values are searchable
735        let results = index.query("uris:one", 10).await.unwrap();
736        assert_eq!(results.hits.len(), 1, "Should find doc with 'one'");
737        assert_eq!(results.hits[0].address.doc_id, 0);
738
739        let results = index.query("uris:two", 10).await.unwrap();
740        assert_eq!(results.hits.len(), 1, "Should find doc with 'two'");
741        assert_eq!(results.hits[0].address.doc_id, 0);
742
743        let results = index.query("uris:three", 10).await.unwrap();
744        assert_eq!(results.hits.len(), 1, "Should find doc with 'three'");
745        assert_eq!(results.hits[0].address.doc_id, 1);
746
747        // Verify searching for non-existent value returns no results
748        let results = index.query("uris:nonexistent", 10).await.unwrap();
749        assert_eq!(results.hits.len(), 0, "Should not find non-existent value");
750    }
751
752    /// Comprehensive test for WAND optimization in BooleanQuery OR queries
753    ///
754    /// This test verifies that:
755    /// 1. BooleanQuery with multiple SHOULD term queries uses WAND automatically
756    /// 2. Search results are correct regardless of WAND optimization
757    /// 3. Scores are reasonable for matching documents
758    #[tokio::test]
759    async fn test_wand_optimization_for_or_queries() {
760        use crate::query::{BooleanQuery, TermQuery};
761
762        let mut schema_builder = SchemaBuilder::default();
763        let content = schema_builder.add_text_field("content", true, true);
764        let schema = schema_builder.build();
765
766        let dir = RamDirectory::new();
767        let config = IndexConfig::default();
768
769        // Create index with documents containing various terms
770        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
771            .await
772            .unwrap();
773
774        // Doc 0: contains "rust" and "programming"
775        let mut doc = Document::new();
776        doc.add_text(content, "rust programming language is fast");
777        writer.add_document(doc).unwrap();
778
779        // Doc 1: contains "rust" only
780        let mut doc = Document::new();
781        doc.add_text(content, "rust is a systems language");
782        writer.add_document(doc).unwrap();
783
784        // Doc 2: contains "programming" only
785        let mut doc = Document::new();
786        doc.add_text(content, "programming is fun");
787        writer.add_document(doc).unwrap();
788
789        // Doc 3: contains "python" (neither rust nor programming)
790        let mut doc = Document::new();
791        doc.add_text(content, "python is easy to learn");
792        writer.add_document(doc).unwrap();
793
794        // Doc 4: contains both "rust" and "programming" multiple times
795        let mut doc = Document::new();
796        doc.add_text(content, "rust rust programming programming systems");
797        writer.add_document(doc).unwrap();
798
799        writer.commit().await.unwrap();
800
801        // Open for reading
802        let index = Index::open(dir.clone(), config.clone()).await.unwrap();
803
804        // Test 1: Pure OR query with multiple terms (should use WAND automatically)
805        let or_query = BooleanQuery::new()
806            .should(TermQuery::text(content, "rust"))
807            .should(TermQuery::text(content, "programming"));
808
809        let results = index.search(&or_query, 10).await.unwrap();
810
811        // Should find docs 0, 1, 2, 4 (all that contain "rust" OR "programming")
812        assert_eq!(results.hits.len(), 4, "Should find exactly 4 documents");
813
814        let doc_ids: Vec<u32> = results.hits.iter().map(|h| h.address.doc_id).collect();
815        assert!(doc_ids.contains(&0), "Should find doc 0");
816        assert!(doc_ids.contains(&1), "Should find doc 1");
817        assert!(doc_ids.contains(&2), "Should find doc 2");
818        assert!(doc_ids.contains(&4), "Should find doc 4");
819        assert!(
820            !doc_ids.contains(&3),
821            "Should NOT find doc 3 (only has 'python')"
822        );
823
824        // Test 2: Single term query (should NOT use WAND, but still work)
825        let single_query = BooleanQuery::new().should(TermQuery::text(content, "rust"));
826
827        let results = index.search(&single_query, 10).await.unwrap();
828        assert_eq!(results.hits.len(), 3, "Should find 3 documents with 'rust'");
829
830        // Test 3: Query with MUST (should NOT use WAND)
831        let must_query = BooleanQuery::new()
832            .must(TermQuery::text(content, "rust"))
833            .should(TermQuery::text(content, "programming"));
834
835        let results = index.search(&must_query, 10).await.unwrap();
836        // Must have "rust", optionally "programming"
837        assert_eq!(results.hits.len(), 3, "Should find 3 documents with 'rust'");
838
839        // Test 4: Query with MUST_NOT (should NOT use WAND)
840        let must_not_query = BooleanQuery::new()
841            .should(TermQuery::text(content, "rust"))
842            .should(TermQuery::text(content, "programming"))
843            .must_not(TermQuery::text(content, "systems"));
844
845        let results = index.search(&must_not_query, 10).await.unwrap();
846        // Should exclude docs with "systems" (doc 1 and 4)
847        let doc_ids: Vec<u32> = results.hits.iter().map(|h| h.address.doc_id).collect();
848        assert!(
849            !doc_ids.contains(&1),
850            "Should NOT find doc 1 (has 'systems')"
851        );
852        assert!(
853            !doc_ids.contains(&4),
854            "Should NOT find doc 4 (has 'systems')"
855        );
856
857        // Test 5: Verify top-k limit works correctly with WAND
858        let or_query = BooleanQuery::new()
859            .should(TermQuery::text(content, "rust"))
860            .should(TermQuery::text(content, "programming"));
861
862        let results = index.search(&or_query, 2).await.unwrap();
863        assert_eq!(results.hits.len(), 2, "Should return only top 2 results");
864
865        // Top results should be docs that match both terms (higher scores)
866        // Doc 0 and 4 contain both "rust" and "programming"
867    }
868
869    /// Test that WAND optimization produces same results as non-WAND for correctness
870    #[tokio::test]
871    async fn test_wand_results_match_standard_boolean() {
872        use crate::query::{BooleanQuery, TermQuery, WandOrQuery};
873
874        let mut schema_builder = SchemaBuilder::default();
875        let content = schema_builder.add_text_field("content", true, true);
876        let schema = schema_builder.build();
877
878        let dir = RamDirectory::new();
879        let config = IndexConfig::default();
880
881        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
882            .await
883            .unwrap();
884
885        // Add several documents
886        for i in 0..10 {
887            let mut doc = Document::new();
888            let text = match i % 4 {
889                0 => "apple banana cherry",
890                1 => "apple orange",
891                2 => "banana grape",
892                _ => "cherry date",
893            };
894            doc.add_text(content, text);
895            writer.add_document(doc).unwrap();
896        }
897
898        writer.commit().await.unwrap();
899        let index = Index::open(dir.clone(), config.clone()).await.unwrap();
900
901        // Compare explicit WandOrQuery with auto-optimized BooleanQuery
902        let wand_query = WandOrQuery::new(content).term("apple").term("banana");
903
904        let bool_query = BooleanQuery::new()
905            .should(TermQuery::text(content, "apple"))
906            .should(TermQuery::text(content, "banana"));
907
908        let wand_results = index.search(&wand_query, 10).await.unwrap();
909        let bool_results = index.search(&bool_query, 10).await.unwrap();
910
911        // Both should find the same documents
912        assert_eq!(
913            wand_results.hits.len(),
914            bool_results.hits.len(),
915            "WAND and Boolean should find same number of docs"
916        );
917
918        let wand_docs: std::collections::HashSet<u32> =
919            wand_results.hits.iter().map(|h| h.address.doc_id).collect();
920        let bool_docs: std::collections::HashSet<u32> =
921            bool_results.hits.iter().map(|h| h.address.doc_id).collect();
922
923        assert_eq!(
924            wand_docs, bool_docs,
925            "WAND and Boolean should find same documents"
926        );
927    }
928
929    #[tokio::test]
930    async fn test_vector_index_threshold_switch() {
931        use crate::dsl::{DenseVectorConfig, DenseVectorQuantization, VectorIndexType};
932
933        // Create schema with dense vector field configured for IVF-RaBitQ
934        let mut schema_builder = SchemaBuilder::default();
935        let title = schema_builder.add_text_field("title", true, true);
936        let embedding = schema_builder.add_dense_vector_field_with_config(
937            "embedding",
938            true, // indexed
939            true, // stored
940            DenseVectorConfig {
941                dim: 8,
942                index_type: VectorIndexType::IvfRaBitQ,
943                quantization: DenseVectorQuantization::F32,
944                num_clusters: Some(4), // Small for test
945                nprobe: 2,
946                build_threshold: Some(50), // Build when we have 50+ vectors
947            },
948        );
949        let schema = schema_builder.build();
950
951        let dir = RamDirectory::new();
952        let config = IndexConfig::default();
953
954        // Phase 1: Add vectors below threshold (should use Flat index)
955        let writer = IndexWriter::create(dir.clone(), schema.clone(), config.clone())
956            .await
957            .unwrap();
958
959        // Add 30 documents (below threshold of 50)
960        for i in 0..30 {
961            let mut doc = Document::new();
962            doc.add_text(title, format!("Document {}", i));
963            // Simple embedding: [i, i, i, i, i, i, i, i] normalized
964            let vec: Vec<f32> = (0..8).map(|_| (i as f32) / 30.0).collect();
965            doc.add_dense_vector(embedding, vec);
966            writer.add_document(doc).unwrap();
967        }
968        writer.commit().await.unwrap();
969
970        // Open index and verify it's using Flat (not built yet)
971        let index = Index::open(dir.clone(), config.clone()).await.unwrap();
972        assert!(
973            index.trained_centroids.is_empty(),
974            "Should not have trained centroids below threshold"
975        );
976
977        // Search should work with Flat index
978        let query_vec: Vec<f32> = vec![0.5; 8];
979        let segments = index.segment_readers().await.unwrap();
980        assert!(!segments.is_empty());
981
982        let results = segments[0]
983            .search_dense_vector(
984                embedding,
985                &query_vec,
986                5,
987                0,
988                1,
989                crate::query::MultiValueCombiner::Max,
990            )
991            .await
992            .unwrap();
993        assert!(!results.is_empty(), "Flat search should return results");
994
995        // Phase 2: Add more vectors to cross threshold
996        let writer = IndexWriter::open(dir.clone(), config.clone())
997            .await
998            .unwrap();
999
1000        // Add 30 more documents (total 60, above threshold of 50)
1001        for i in 30..60 {
1002            let mut doc = Document::new();
1003            doc.add_text(title, format!("Document {}", i));
1004            let vec: Vec<f32> = (0..8).map(|_| (i as f32) / 60.0).collect();
1005            doc.add_dense_vector(embedding, vec);
1006            writer.add_document(doc).unwrap();
1007        }
1008        // Commit auto-triggers vector index build when threshold is crossed
1009        writer.commit().await.unwrap();
1010
1011        // Verify centroids were trained (auto-triggered)
1012        assert!(
1013            writer.is_vector_index_built(embedding).await,
1014            "Vector index should be built after crossing threshold"
1015        );
1016
1017        // Reopen index and verify trained structures are loaded
1018        let index = Index::open(dir.clone(), config.clone()).await.unwrap();
1019        assert!(
1020            index.trained_centroids.contains_key(&embedding.0),
1021            "Should have loaded trained centroids for embedding field"
1022        );
1023
1024        // Search should still work
1025        let segments = index.segment_readers().await.unwrap();
1026        let results = segments[0]
1027            .search_dense_vector(
1028                embedding,
1029                &query_vec,
1030                5,
1031                0,
1032                1,
1033                crate::query::MultiValueCombiner::Max,
1034            )
1035            .await
1036            .unwrap();
1037        assert!(
1038            !results.is_empty(),
1039            "Search should return results after build"
1040        );
1041
1042        // Phase 3: Verify calling build_vector_index again is a no-op
1043        let writer = IndexWriter::open(dir.clone(), config.clone())
1044            .await
1045            .unwrap();
1046        writer.build_vector_index().await.unwrap(); // Should skip training
1047
1048        // Still built
1049        assert!(writer.is_vector_index_built(embedding).await);
1050    }
1051}