1use crate::bloom::BloomFilter;
8use crate::decompress::maybe_decompress;
9use crate::error::{Error, Result};
10use crate::format::*;
11use crate::posting::{PostingEntry, PostingList};
12use crate::string_pool::StringPool;
13use crate::trigram::{Extractor, Trigram};
14use ignore::WalkBuilder;
15use memmap2::Mmap;
16use std::collections::HashMap;
17use std::fs::{self, File};
18use std::io::{BufWriter, Seek, SeekFrom, Write};
19use std::path::{Path, PathBuf};
20use std::time::{Instant, SystemTime, UNIX_EPOCH};
21
22pub struct Builder {
23 root: PathBuf,
24 files: Vec<FileRecord>,
25 postings: HashMap<Trigram, Vec<PostingEntry>>,
26 string_pool: StringPool,
27 extractor: Extractor,
28 stats: BuildStats,
29 decompress: bool,
30}
31
32pub struct FileRecord {
33 pub file_id: u32,
34 pub path: PathBuf,
35 pub mtime_ns: u64,
36 pub size_bytes: u64,
37 pub content_hash: u64,
38 pub trigram_count: u32,
39 pub is_binary: bool,
40 pub trigrams: HashMap<Trigram, Vec<u32>>,
41 pub bloom: BloomFilter,
42}
43
44#[derive(Default, Debug)]
45pub struct BuildStats {
46 pub files_scanned: u64,
47 pub files_skipped_binary: u64,
48 pub files_skipped_size: u64,
49 pub bytes_scanned: u64,
50 pub unique_trigrams: u64,
51}
52
53impl Builder {
54 pub fn new(root: &Path) -> Self {
55 Self {
56 root: root.to_owned(),
57 files: Vec::new(),
58 postings: HashMap::new(),
59 string_pool: StringPool::new(),
60 extractor: Extractor::new(),
61 stats: BuildStats::default(),
62 decompress: false,
63 }
64 }
65
66 pub fn set_decompress(&mut self, decompress: bool) {
67 self.decompress = decompress;
68 }
69
70 pub fn build(&mut self) -> Result<PathBuf> {
71 let start = Instant::now();
72
73 let file_paths = self.discover_files()?;
75
76 let mut next_id = 0u32;
78 for path in file_paths {
79 if self.process_file(next_id, path)? {
80 next_id += 1;
81 }
82 }
83
84 let output_path = self.serialize()?;
86
87 tracing::info!("Build completed in {:?}: {:?}", start.elapsed(), self.stats);
88
89 Ok(output_path)
90 }
91
92 pub fn update(&mut self, changed_files: &[PathBuf]) -> Result<PathBuf> {
93 let start = Instant::now();
94
95 if self.files.is_empty() {
96 return self.build();
97 }
98
99 let changed_set: std::collections::HashSet<_> = changed_files.iter().collect();
100
101 let old_files = std::mem::take(&mut self.files);
102 self.postings = HashMap::new();
103 self.string_pool = StringPool::new();
104
105 let mut next_id = 0u32;
106 for mut record in old_files {
107 if changed_set.contains(&record.path) {
108 if self.process_file(next_id, record.path.clone())? {
110 next_id += 1;
111 }
112 } else if record.path.exists() {
113 self.string_pool.add_path(&record.path);
115 for (&tri, offsets) in &record.trigrams {
116 self.postings.entry(tri).or_default().push(PostingEntry {
117 file_id: next_id,
118 offsets: offsets.clone(),
119 });
120 }
121 record.file_id = next_id;
122 self.files.push(record);
123 next_id += 1;
124 }
125 }
127
128 let mut to_add = Vec::new();
130 {
131 let existing_paths: std::collections::HashSet<_> =
132 self.files.iter().map(|f| &f.path).collect();
133 for path in changed_files {
134 if !existing_paths.contains(path) && path.exists() {
135 to_add.push(path.clone());
136 }
137 }
138 }
139
140 for path in to_add {
141 if self.process_file(next_id, path)? {
142 next_id += 1;
143 }
144 }
145
146 let output_path = self.serialize()?;
147 tracing::info!("Incremental update completed in {:?}", start.elapsed());
148 Ok(output_path)
149 }
150
151 pub fn files_len(&self) -> usize {
152 self.files.len()
153 }
154
155 pub fn trigrams_len(&self) -> usize {
156 self.postings.len()
157 }
158
159 fn discover_files(&mut self) -> Result<Vec<PathBuf>> {
160 let mut paths = Vec::new();
161
162 let walker = WalkBuilder::new(&self.root)
163 .hidden(false)
164 .git_ignore(true)
165 .require_git(false)
166 .add_custom_ignore_filename(".ixignore")
167 .filter_entry(move |entry| {
168 let path = entry.path();
169 if entry.file_type().map(|t| t.is_dir()).unwrap_or(false) {
171 let name = path.file_name().and_then(|n| n.to_str()).unwrap_or("");
172 if name == "target" || name == ".git" || name == "node_modules" || name == ".ix"
173 {
174 return false;
175 }
176 }
177 true
178 })
179 .build();
180
181 for result in walker {
182 let entry = result.map_err(|e| Error::Config(e.to_string()))?;
183 if entry.file_type().map(|t| t.is_file()).unwrap_or(false) {
184 paths.push(entry.path().to_owned());
185 }
186 }
187 Ok(paths)
188 }
189
190 fn process_file(&mut self, file_id: u32, path: PathBuf) -> Result<bool> {
191 let metadata = fs::metadata(&path)?;
192 let size = metadata.len();
193 let mtime = metadata
194 .modified()?
195 .duration_since(UNIX_EPOCH)
196 .map(|d| d.as_nanos() as u64)
197 .unwrap_or(0);
198
199 if size > 100 * 1024 * 1024 {
200 self.stats.files_skipped_size += 1;
202 return Ok(false);
203 }
204
205 let file = File::open(&path)?;
206 let mmap = unsafe { Mmap::map(&file)? };
207
208 let raw_data = if self.decompress {
210 if let Some(mut reader) = maybe_decompress(&path, &mmap)? {
211 let mut buf = Vec::new();
212 use std::io::Read;
216 reader.read_to_end(&mut buf)?; std::borrow::Cow::Owned(buf)
221 } else {
222 std::borrow::Cow::Borrowed(&mmap[..])
223 }
224 } else {
225 std::borrow::Cow::Borrowed(&mmap[..])
226 };
227
228 let data = &raw_data[..];
229
230 if is_binary(data) {
232 self.stats.files_skipped_binary += 1;
233 return Ok(false);
234 }
235
236 let content_hash = xxhash_rust::xxh64::xxh64(data, 0);
237 let trigrams = self.extractor.extract_with_offsets(data).clone();
238 let trigram_count = trigrams.len() as u32;
239
240 self.string_pool.add_path(&path);
241
242 let mut bloom = BloomFilter::new(256, 5);
243 for (&tri, offsets) in &trigrams {
244 bloom.insert(tri);
245 self.postings.entry(tri).or_default().push(PostingEntry {
246 file_id,
247 offsets: offsets.clone(),
248 });
249 }
250
251 self.files.push(FileRecord {
252 file_id,
253 path,
254 mtime_ns: mtime,
255 size_bytes: size,
256 content_hash,
257 trigram_count,
258 is_binary: false,
259 trigrams,
260 bloom,
261 });
262
263 self.stats.files_scanned += 1;
264 self.stats.bytes_scanned += size;
265
266 Ok(true)
267 }
268
269 fn serialize(&mut self) -> Result<PathBuf> {
270 let ix_dir = self.root.join(".ix");
271 fs::create_dir_all(&ix_dir)?;
272 let tmp_path = ix_dir.join("shard.ix.tmp");
273 let final_path = ix_dir.join("shard.ix");
274
275 let mut f = BufWriter::new(File::create(&tmp_path)?);
276
277 f.write_all(&[0u8; HEADER_SIZE])?;
279
280 let mut string_pool_buf = std::io::Cursor::new(Vec::new());
282 self.string_pool.serialize(&mut string_pool_buf)?;
283 let string_pool_data = string_pool_buf.into_inner();
284
285 let file_table_offset = self.align_to_8(&mut f)?;
287 for record in &self.files {
288 let (path_off, path_len) = self.string_pool.get_info(&record.path);
289 f.write_all(&record.file_id.to_le_bytes())?;
290 f.write_all(&path_off.to_le_bytes())?;
291 f.write_all(&path_len.to_le_bytes())?;
292 f.write_all(&[FileStatus::Fresh as u8])?;
293 f.write_all(&[0u8])?; f.write_all(&record.mtime_ns.to_le_bytes())?;
295 f.write_all(&record.size_bytes.to_le_bytes())?;
296 f.write_all(&record.content_hash.to_le_bytes())?;
297 f.write_all(&record.trigram_count.to_le_bytes())?;
298 f.write_all(&0u32.to_le_bytes())?; f.write_all(&[0u8; 4])?; }
301 let file_table_size = f.stream_position()? - file_table_offset;
302
303 self.align_to_8(&mut f)?;
305 let posting_data_offset = f.stream_position()?;
306 let mut posting_infos = HashMap::new();
307
308 let mut sorted_trigrams: Vec<Trigram> = self.postings.keys().cloned().collect();
309 sorted_trigrams.sort_unstable();
310
311 for &tri in &sorted_trigrams {
312 let entries = &self.postings[&tri];
313 let list = PostingList {
314 entries: entries.clone(),
315 };
316 let encoded = list.encode();
317 let offset = f.stream_position()? - posting_data_offset;
318 f.write_all(&encoded)?;
319 posting_infos.insert(tri, (offset, encoded.len() as u32, entries.len() as u32));
320 }
321 let posting_data_size = f.stream_position()? - posting_data_offset;
322
323 self.align_to_8(&mut f)?;
325 let trigram_table_offset = f.stream_position()?;
326 for &tri in &sorted_trigrams {
327 if let Some(&(off, len, freq)) = posting_infos.get(&tri) {
328 let abs_off = posting_data_offset + off;
329 f.write_all(&tri.to_le_bytes())?; f.write_all(&abs_off.to_le_bytes()[..6])?; f.write_all(&len.to_le_bytes())?; f.write_all(&freq.to_le_bytes())?; f.write_all(&[0u8; 2])?; }
335 }
336 let trigram_table_size = f.stream_position()? - trigram_table_offset;
337
338 self.align_to_8(&mut f)?;
340 let bloom_offset = f.stream_position()?;
341 let mut bloom_relative_offsets = Vec::new();
342 for record in &self.files {
343 let rel_off = (f.stream_position()? - bloom_offset) as u32;
344 bloom_relative_offsets.push(rel_off);
345 record.bloom.serialize(&mut f)?;
346 }
347 let bloom_size = f.stream_position()? - bloom_offset;
348
349 self.align_to_8(&mut f)?;
351 let string_pool_offset = f.stream_position()?;
352 f.write_all(&string_pool_data)?;
353 let string_pool_size = string_pool_data.len() as u64;
354
355 let name_index_offset = f.stream_position()?;
357 let name_index_size = 0u64;
358
359 f.seek(SeekFrom::Start(file_table_offset))?;
361 for (i, _record) in self.files.iter().enumerate() {
362 f.seek(SeekFrom::Current(40))?; f.write_all(&bloom_relative_offsets[i].to_le_bytes())?;
364 f.seek(SeekFrom::Current(4))?; }
366
367 let created_at = SystemTime::now()
369 .duration_since(UNIX_EPOCH)
370 .unwrap()
371 .as_micros() as u64;
372 let mut header_bytes = [0u8; HEADER_SIZE];
373 header_bytes[0..4].copy_from_slice(&MAGIC);
374 header_bytes[0x04..0x06].copy_from_slice(&VERSION_MAJOR.to_le_bytes());
375 header_bytes[0x06..0x08].copy_from_slice(&VERSION_MINOR.to_le_bytes());
376 header_bytes[0x08..0x10].copy_from_slice(
377 &(flags::HAS_BLOOM_FILTERS
378 | flags::HAS_CONTENT_HASHES
379 | flags::POSTING_LISTS_CHECKSUMMED)
380 .to_le_bytes(),
381 );
382 header_bytes[0x10..0x18].copy_from_slice(&created_at.to_le_bytes());
383 header_bytes[0x18..0x20].copy_from_slice(&self.stats.bytes_scanned.to_le_bytes());
384 header_bytes[0x20..0x24].copy_from_slice(&(self.files.len() as u32).to_le_bytes());
385 header_bytes[0x24..0x28].copy_from_slice(&(sorted_trigrams.len() as u32).to_le_bytes());
386 header_bytes[0x28..0x30].copy_from_slice(&file_table_offset.to_le_bytes());
387 header_bytes[0x30..0x38].copy_from_slice(&file_table_size.to_le_bytes());
388 header_bytes[0x38..0x40].copy_from_slice(&trigram_table_offset.to_le_bytes());
389 header_bytes[0x40..0x48].copy_from_slice(&trigram_table_size.to_le_bytes());
390 header_bytes[0x48..0x50].copy_from_slice(&posting_data_offset.to_le_bytes());
391 header_bytes[0x50..0x58].copy_from_slice(&posting_data_size.to_le_bytes());
392 header_bytes[0x58..0x60].copy_from_slice(&bloom_offset.to_le_bytes());
393 header_bytes[0x60..0x68].copy_from_slice(&bloom_size.to_le_bytes());
394 header_bytes[0x68..0x70].copy_from_slice(&string_pool_offset.to_le_bytes());
395 header_bytes[0x70..0x78].copy_from_slice(&string_pool_size.to_le_bytes());
396 header_bytes[0x78..0x80].copy_from_slice(&name_index_offset.to_le_bytes());
397 header_bytes[0x80..0x88].copy_from_slice(&name_index_size.to_le_bytes());
398
399 let crc = crc32c::crc32c(&header_bytes[0..0xF8]);
400 header_bytes[0xF8..0xFC].copy_from_slice(&crc.to_le_bytes());
401
402 f.seek(SeekFrom::Start(0))?;
403 f.write_all(&header_bytes)?;
404 f.flush()?;
405 drop(f);
406
407 fs::rename(&tmp_path, &final_path)?;
408 Ok(final_path)
409 }
410
411 fn align_to_8<W: Write + Seek>(&self, mut w: W) -> std::io::Result<u64> {
412 let pos = w.stream_position()?;
413 let padding = (8 - (pos % 8)) % 8;
414 if padding > 0 {
415 w.write_all(&vec![0u8; padding as usize])?;
416 }
417 w.stream_position()
418 }
419}