1mod gc;
6pub mod handle;
7pub mod ingest;
8
9#[doc(hidden)]
10pub use gc::{FragmentationEntry, FragmentationMap};
11
12use crate::{
13 coding::Decode,
14 file::{fsync_directory, BLOBS_FOLDER},
15 iter_guard::{IterGuard, IterGuardImpl},
16 r#abstract::{AbstractTree, RangeItem},
17 table::Table,
18 tree::inner::MemtableId,
19 value::InternalValue,
20 version::Version,
21 vlog::{Accessor, BlobFile, BlobFileWriter, ValueHandle},
22 Cache, Config, DescriptorTable, Memtable, SeqNo, TableId, TreeId, UserKey, UserValue,
23};
24use handle::BlobIndirection;
25use std::{
26 io::Cursor,
27 ops::RangeBounds,
28 path::PathBuf,
29 sync::{Arc, MutexGuard},
30};
31
32pub struct Guard {
34 tree: crate::BlobTree,
35 version: Version,
36 kv: crate::Result<InternalValue>,
37}
38
39impl IterGuard for Guard {
40 fn into_inner_if(
41 self,
42 pred: impl Fn(&UserKey) -> bool,
43 ) -> crate::Result<(UserKey, Option<UserValue>)> {
44 let kv = self.kv?;
45
46 if pred(&kv.key.user_key) {
47 resolve_value_handle(
48 self.tree.id(),
49 self.tree.blobs_folder.as_path(),
50 &self.tree.index.config.cache,
51 &self.tree.index.config.descriptor_table,
52 &self.version,
53 kv,
54 )
55 .map(|(k, v)| (k, Some(v)))
56 } else {
57 Ok((kv.key.user_key, None))
58 }
59 }
60
61 fn key(self) -> crate::Result<UserKey> {
62 self.kv.map(|kv| kv.key.user_key)
63 }
64
65 fn size(self) -> crate::Result<u32> {
66 let kv = self.kv?;
67
68 if kv.key.value_type.is_indirection() {
69 let mut cursor = Cursor::new(kv.value);
70 Ok(BlobIndirection::decode_from(&mut cursor)?.size)
71 } else {
72 #[expect(clippy::cast_possible_truncation, reason = "values are u32 max length")]
73 Ok(kv.value.len() as u32)
74 }
75 }
76
77 fn into_inner(self) -> crate::Result<(UserKey, UserValue)> {
78 resolve_value_handle(
79 self.tree.id(),
80 self.tree.blobs_folder.as_path(),
81 &self.tree.index.config.cache,
82 &self.tree.index.config.descriptor_table,
83 &self.version,
84 self.kv?,
85 )
86 }
87}
88
89fn resolve_value_handle(
90 tree_id: TreeId,
91 blobs_folder: &std::path::Path,
92 cache: &Arc<Cache>,
93 descriptor_table: &Arc<DescriptorTable>,
94 version: &Version,
95 item: InternalValue,
96) -> RangeItem {
97 if item.key.value_type.is_indirection() {
98 let mut cursor = Cursor::new(item.value);
99 let vptr = BlobIndirection::decode_from(&mut cursor)?;
100
101 match Accessor::new(&version.blob_files).get(
103 tree_id,
104 blobs_folder,
105 &item.key.user_key,
106 &vptr.vhandle,
107 cache,
108 descriptor_table,
109 ) {
110 Ok(Some(v)) => {
111 let k = item.key.user_key;
112 Ok((k, v))
113 }
114 Ok(None) => {
115 panic!(
116 "value handle ({:?} => {:?}) did not match any blob - this is a bug; version={}",
117 item.key.user_key, vptr.vhandle,
118 version.id(),
119 );
120 }
121 Err(e) => Err(e),
122 }
123 } else {
124 let k = item.key.user_key;
125 let v = item.value;
126 Ok((k, v))
127 }
128}
129
130#[derive(Clone)]
136pub struct BlobTree {
137 #[doc(hidden)]
139 pub index: crate::Tree,
140
141 blobs_folder: Arc<PathBuf>,
142}
143
144impl BlobTree {
145 pub(crate) fn open(config: Config) -> crate::Result<Self> {
146 let index = crate::Tree::open(config)?;
147
148 let blobs_folder = index.config.path.join(BLOBS_FOLDER);
149 std::fs::create_dir_all(&blobs_folder)?;
150 fsync_directory(&blobs_folder)?;
151
152 let blob_file_id_to_continue_with = index
153 .current_version()
154 .blob_files
155 .list_ids()
156 .max()
157 .map(|x| x + 1)
158 .unwrap_or_default();
159
160 index
161 .0
162 .blob_file_id_counter
163 .set(blob_file_id_to_continue_with);
164
165 Ok(Self {
166 index,
167 blobs_folder: Arc::new(blobs_folder),
168 })
169 }
170}
171
172impl AbstractTree for BlobTree {
173 fn table_file_cache_size(&self) -> usize {
174 self.index.table_file_cache_size()
175 }
176
177 fn get_version_history_lock(
178 &self,
179 ) -> std::sync::RwLockWriteGuard<'_, crate::version::SuperVersions> {
180 self.index.get_version_history_lock()
181 }
182
183 fn next_table_id(&self) -> TableId {
184 self.index.next_table_id()
185 }
186
187 fn id(&self) -> crate::TreeId {
188 self.index.id()
189 }
190
191 fn get_internal_entry(&self, key: &[u8], seqno: SeqNo) -> crate::Result<Option<InternalValue>> {
192 self.index.get_internal_entry(key, seqno)
193 }
194
195 fn current_version(&self) -> Version {
196 self.index.current_version()
197 }
198
199 #[cfg(feature = "metrics")]
200 fn metrics(&self) -> &Arc<crate::Metrics> {
201 self.index.metrics()
202 }
203
204 fn version_free_list_len(&self) -> usize {
205 self.index.version_free_list_len()
206 }
207
208 fn prefix<K: AsRef<[u8]>>(
209 &self,
210 prefix: K,
211 seqno: SeqNo,
212 index: Option<(Arc<Memtable>, SeqNo)>,
213 ) -> Box<dyn DoubleEndedIterator<Item = IterGuardImpl> + Send + 'static> {
214 use crate::range::prefix_to_range;
215
216 let super_version = self.index.get_version_for_snapshot(seqno);
217 let tree = self.clone();
218
219 let range = prefix_to_range(prefix.as_ref());
220
221 Box::new(
222 crate::Tree::create_internal_range(super_version.clone(), &range, seqno, index).map(
223 move |kv| {
224 IterGuardImpl::Blob(Guard {
225 tree: tree.clone(),
226 version: super_version.version.clone(),
227 kv,
228 })
229 },
230 ),
231 )
232 }
233
234 fn range<K: AsRef<[u8]>, R: RangeBounds<K>>(
235 &self,
236 range: R,
237 seqno: SeqNo,
238 index: Option<(Arc<Memtable>, SeqNo)>,
239 ) -> Box<dyn DoubleEndedIterator<Item = IterGuardImpl> + Send + 'static> {
240 let super_version = self.index.get_version_for_snapshot(seqno);
241 let tree = self.clone();
242
243 Box::new(
244 crate::Tree::create_internal_range(super_version.clone(), &range, seqno, index).map(
245 move |kv| {
246 IterGuardImpl::Blob(Guard {
247 tree: tree.clone(),
248 version: super_version.version.clone(),
249 kv,
250 })
251 },
252 ),
253 )
254 }
255
256 fn tombstone_count(&self) -> u64 {
257 self.index.tombstone_count()
258 }
259
260 fn weak_tombstone_count(&self) -> u64 {
261 self.index.weak_tombstone_count()
262 }
263
264 fn weak_tombstone_reclaimable_count(&self) -> u64 {
265 self.index.weak_tombstone_reclaimable_count()
266 }
267
268 fn drop_range<K: AsRef<[u8]>, R: RangeBounds<K>>(&self, range: R) -> crate::Result<()> {
269 self.index.drop_range(range)
270 }
271
272 fn clear(&self) -> crate::Result<()> {
273 self.index.clear()
274 }
275
276 fn major_compact(&self, target_size: u64, seqno_threshold: SeqNo) -> crate::Result<()> {
277 self.index.major_compact(target_size, seqno_threshold)
278 }
279
280 fn clear_active_memtable(&self) {
281 self.index.clear_active_memtable();
282 }
283
284 fn l0_run_count(&self) -> usize {
285 self.index.l0_run_count()
286 }
287
288 fn blob_file_count(&self) -> usize {
289 self.current_version().blob_file_count()
290 }
291
292 fn size_of<K: AsRef<[u8]>>(&self, key: K, seqno: SeqNo) -> crate::Result<Option<u32>> {
295 let Some(item) = self.index.get_internal_entry(key.as_ref(), seqno)? else {
296 return Ok(None);
297 };
298
299 Ok(Some(if item.key.value_type.is_indirection() {
300 let mut cursor = Cursor::new(item.value);
301 let vptr = BlobIndirection::decode_from(&mut cursor)?;
302 vptr.size
303 } else {
304 #[expect(clippy::cast_possible_truncation, reason = "values are u32 length max")]
305 {
306 item.value.len() as u32
307 }
308 }))
309 }
310
311 fn stale_blob_bytes(&self) -> u64 {
312 self.current_version().gc_stats().stale_bytes()
313 }
314
315 fn filter_size(&self) -> u64 {
316 self.index.filter_size()
317 }
318
319 fn pinned_filter_size(&self) -> usize {
320 self.index.pinned_filter_size()
321 }
322
323 fn pinned_block_index_size(&self) -> usize {
324 self.index.pinned_block_index_size()
325 }
326
327 fn sealed_memtable_count(&self) -> usize {
328 self.index.sealed_memtable_count()
329 }
330
331 fn get_flush_lock(&self) -> MutexGuard<'_, ()> {
332 self.index.get_flush_lock()
333 }
334
335 fn flush_to_tables(
336 &self,
337 stream: impl Iterator<Item = crate::Result<InternalValue>>,
338 ) -> crate::Result<Option<(Vec<Table>, Option<Vec<BlobFile>>)>> {
339 use crate::{coding::Encode, file::TABLES_FOLDER, table::multi_writer::MultiWriter};
340
341 let start = std::time::Instant::now();
342
343 let table_folder = self.index.config.path.join(TABLES_FOLDER);
344
345 let data_block_size = self.index.config.data_block_size_policy.get(0);
346
347 let data_block_restart_interval =
348 self.index.config.data_block_restart_interval_policy.get(0);
349 let index_block_restart_interval =
350 self.index.config.index_block_restart_interval_policy.get(0);
351
352 let data_block_compression = self.index.config.data_block_compression_policy.get(0);
353 let index_block_compression = self.index.config.index_block_compression_policy.get(0);
354
355 let data_block_hash_ratio = self.index.config.data_block_hash_ratio_policy.get(0);
356
357 let index_partitioning = self.index.config.index_block_partitioning_policy.get(0);
358 let filter_partitioning = self.index.config.filter_block_partitioning_policy.get(0);
359
360 log::debug!("Flushing memtable(s) and performing key-value separation, data_block_restart_interval={data_block_restart_interval}, index_block_restart_interval={index_block_restart_interval}, data_block_size={data_block_size}, data_block_compression={data_block_compression}, index_block_compression={index_block_compression}");
361 log::debug!("=> to table(s) in {}", table_folder.display());
362 log::debug!("=> to blob file(s) at {}", self.blobs_folder.display());
363
364 let mut table_writer = MultiWriter::new(
365 table_folder.clone(),
366 self.index.table_id_counter.clone(),
367 64 * 1_024 * 1_024,
368 0,
369 )?
370 .use_data_block_restart_interval(data_block_restart_interval)
371 .use_index_block_restart_interval(index_block_restart_interval)
372 .use_data_block_compression(data_block_compression)
373 .use_index_block_compression(index_block_compression)
374 .use_data_block_size(data_block_size)
375 .use_data_block_hash_ratio(data_block_hash_ratio)
376 .use_bloom_policy({
377 use crate::config::FilterPolicyEntry::{Bloom, None};
378 use crate::table::filter::BloomConstructionPolicy;
379
380 match self.index.config.filter_policy.get(0) {
381 Bloom(policy) => policy,
382 None => BloomConstructionPolicy::BitsPerKey(0.0),
383 }
384 });
385
386 if index_partitioning {
387 table_writer = table_writer.use_partitioned_index();
388 }
389 if filter_partitioning {
390 table_writer = table_writer.use_partitioned_filter();
391 }
392
393 #[expect(
394 clippy::expect_used,
395 reason = "cannot create blob tree without defining kv separation options"
396 )]
397 let kv_opts = self
398 .index
399 .config
400 .kv_separation_opts
401 .as_ref()
402 .expect("kv separation options should exist");
403
404 let mut blob_writer = BlobFileWriter::new(
405 self.index.0.blob_file_id_counter.clone(),
406 self.index.config.path.join(BLOBS_FOLDER),
407 )?
408 .use_target_size(kv_opts.file_target_size)
409 .use_compression(kv_opts.compression);
410
411 let separation_threshold = kv_opts.separation_threshold;
412
413 for item in stream {
414 let item = item?;
415
416 if item.is_tombstone() {
417 table_writer.write(InternalValue::new(item.key, UserValue::empty()))?;
420 continue;
421 }
422
423 let value = item.value;
424
425 #[expect(clippy::cast_possible_truncation, reason = "values are u32 length max")]
426 let value_size = value.len() as u32;
427
428 if value_size >= separation_threshold {
429 let offset = blob_writer.offset();
430 let blob_file_id = blob_writer.blob_file_id();
431 let on_disk_size = blob_writer.write(&item.key.user_key, item.key.seqno, &value)?;
432
433 let indirection = BlobIndirection {
434 vhandle: ValueHandle {
435 blob_file_id,
436 offset,
437 on_disk_size,
438 },
439 size: value_size,
440 };
441
442 table_writer.write({
443 let mut vptr =
444 InternalValue::new(item.key.clone(), indirection.encode_into_vec());
445 vptr.key.value_type = crate::ValueType::Indirection;
446 vptr
447 })?;
448
449 table_writer.register_blob(indirection);
450 } else {
451 table_writer.write(InternalValue::new(item.key, value))?;
452 }
453 }
454
455 let blob_files = blob_writer.finish()?;
456
457 let result = table_writer.finish()?;
458
459 log::debug!("Flushed memtable(s) in {:?}", start.elapsed());
460
461 let pin_filter = self.index.config.filter_block_pinning_policy.get(0);
462 let pin_index = self.index.config.index_block_pinning_policy.get(0);
463
464 let tables = result
466 .into_iter()
467 .map(|(table_id, checksum)| -> crate::Result<Table> {
468 Table::recover(
469 table_folder.join(table_id.to_string()),
470 checksum,
471 0,
472 self.index.id,
473 self.index.config.cache.clone(),
474 self.index.config.descriptor_table.clone(),
475 pin_filter,
476 pin_index,
477 #[cfg(feature = "metrics")]
478 self.index.metrics.clone(),
479 )
480 })
481 .collect::<crate::Result<Vec<_>>>()?;
482
483 Ok(Some((tables, Some(blob_files))))
484 }
485
486 fn register_tables(
487 &self,
488 tables: &[Table],
489 blob_files: Option<&[BlobFile]>,
490 frag_map: Option<FragmentationMap>,
491 sealed_memtables_to_delete: &[MemtableId],
492 gc_watermark: SeqNo,
493 ) -> crate::Result<()> {
494 self.index.register_tables(
495 tables,
496 blob_files,
497 frag_map,
498 sealed_memtables_to_delete,
499 gc_watermark,
500 )
501 }
502
503 fn compact(
504 &self,
505 strategy: Arc<dyn crate::compaction::CompactionStrategy>,
506 seqno_threshold: SeqNo,
507 ) -> crate::Result<()> {
508 self.index.compact(strategy, seqno_threshold)
509 }
510
511 fn get_next_table_id(&self) -> TableId {
512 self.index.get_next_table_id()
513 }
514
515 fn tree_config(&self) -> &Config {
516 &self.index.config
517 }
518
519 fn get_highest_seqno(&self) -> Option<SeqNo> {
520 self.index.get_highest_seqno()
521 }
522
523 fn active_memtable(&self) -> Arc<Memtable> {
524 self.index.active_memtable()
525 }
526
527 fn tree_type(&self) -> crate::TreeType {
528 crate::TreeType::Blob
529 }
530
531 fn rotate_memtable(&self) -> Option<Arc<Memtable>> {
532 self.index.rotate_memtable()
533 }
534
535 fn table_count(&self) -> usize {
536 self.index.table_count()
537 }
538
539 fn level_table_count(&self, idx: usize) -> Option<usize> {
540 self.index.level_table_count(idx)
541 }
542
543 fn approximate_len(&self) -> usize {
544 self.index.approximate_len()
545 }
546
547 fn is_empty(&self, seqno: SeqNo, index: Option<(Arc<Memtable>, SeqNo)>) -> crate::Result<bool> {
550 self.index.is_empty(seqno, index)
551 }
552
553 fn contains_key<K: AsRef<[u8]>>(&self, key: K, seqno: SeqNo) -> crate::Result<bool> {
556 self.index.contains_key(key, seqno)
557 }
558
559 fn len(&self, seqno: SeqNo, index: Option<(Arc<Memtable>, SeqNo)>) -> crate::Result<usize> {
562 self.index.len(seqno, index)
563 }
564
565 fn disk_space(&self) -> u64 {
566 let version = self.current_version();
567 self.index.disk_space() + version.blob_files.on_disk_size()
568 }
569
570 fn get_highest_memtable_seqno(&self) -> Option<SeqNo> {
571 self.index.get_highest_memtable_seqno()
572 }
573
574 fn get_highest_persisted_seqno(&self) -> Option<SeqNo> {
575 self.index.get_highest_persisted_seqno()
576 }
577
578 fn insert<K: Into<UserKey>, V: Into<UserValue>>(
579 &self,
580 key: K,
581 value: V,
582 seqno: SeqNo,
583 ) -> (u64, u64) {
584 self.index.insert(key, value.into(), seqno)
585 }
586
587 fn get<K: AsRef<[u8]>>(&self, key: K, seqno: SeqNo) -> crate::Result<Option<crate::UserValue>> {
588 let key = key.as_ref();
589
590 #[expect(clippy::expect_used, reason = "lock is expected to not be poisoned")]
591 let super_version = self
592 .index
593 .version_history
594 .read()
595 .expect("lock is poisoned")
596 .get_version_for_snapshot(seqno);
597
598 let Some(item) = crate::Tree::get_internal_entry_from_version(&super_version, key, seqno)?
599 else {
600 return Ok(None);
601 };
602
603 let (_, v) = resolve_value_handle(
604 self.id(),
605 self.blobs_folder.as_path(),
606 &self.index.config.cache,
607 &self.index.config.descriptor_table,
608 &super_version.version,
609 item,
610 )?;
611
612 Ok(Some(v))
613 }
614
615 fn remove<K: Into<UserKey>>(&self, key: K, seqno: SeqNo) -> (u64, u64) {
616 self.index.remove(key, seqno)
617 }
618
619 fn remove_weak<K: Into<UserKey>>(&self, key: K, seqno: SeqNo) -> (u64, u64) {
620 self.index.remove_weak(key, seqno)
621 }
622}