1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
// Copyright (c) 2024-present, fjall-rs
// This source code is licensed under both the Apache 2.0 and MIT License
// (found in the LICENSE-* files in the repository)
use crate::{
SeqNo, UserKey, UserValue, blob_tree::handle::BlobIndirection, file::BLOBS_FOLDER,
table::Table, tree::ingest::Ingestion as TableIngestion, vlog::BlobFileWriter,
};
use std::cmp::Ordering;
/// Bulk ingestion for [`BlobTree`]
///
/// Items NEED to be added in ascending key order.
///
/// Uses table ingestion for the index and a blob file writer for large
/// values so both streams advance together.
pub struct BlobIngestion<'a> {
tree: &'a crate::BlobTree,
pub(crate) table: TableIngestion<'a>,
pub(crate) blob: BlobFileWriter,
seqno: SeqNo,
separation_threshold: u32,
last_key: Option<UserKey>,
}
impl<'a> BlobIngestion<'a> {
/// Creates a new ingestion.
///
/// # Errors
///
/// Will return `Err` if an IO error occurs.
pub fn new(tree: &'a crate::BlobTree) -> crate::Result<Self> {
#[expect(
clippy::expect_used,
reason = "cannot define blob tree without kv separation options"
)]
let kv = tree
.index
.config
.kv_separation_opts
.as_ref()
.expect("kv separation options should exist");
let blob_file_size = kv.file_target_size;
let table = TableIngestion::new(&tree.index)?;
let blob = BlobFileWriter::new(
tree.index.0.blob_file_id_counter.clone(),
tree.index.config.path.join(BLOBS_FOLDER),
tree.index.id,
tree.index.config.descriptor_table.clone(),
tree.index.config.fs.clone(),
)?
.use_target_size(blob_file_size)
.use_compression(kv.compression);
let separation_threshold = kv.separation_threshold;
Ok(Self {
tree,
table,
blob,
seqno: 0,
separation_threshold,
last_key: None,
})
}
/// Writes a key-value pair.
///
/// # Errors
///
/// Will return `Err` if an IO error occurs.
pub fn write(&mut self, key: UserKey, value: UserValue) -> crate::Result<()> {
// Check order before any blob I/O to avoid partial writes on failure
if let Some(prev) = &self.last_key {
assert!(
self.tree.index.config.comparator.compare(prev, &key) == Ordering::Less,
"next key in ingestion must be ordered after last key by configured comparator"
);
}
#[expect(clippy::cast_possible_truncation)]
let value_size = value.len() as u32;
if value_size >= self.separation_threshold {
let vhandle = self.blob.write(&key, self.seqno, &value)?;
let indirection = BlobIndirection {
vhandle,
size: value_size,
};
let cloned_key = key.clone();
let res = self.table.write_indirection(key, indirection);
if res.is_ok() {
self.last_key = Some(cloned_key);
}
res
} else {
let cloned_key = key.clone();
let res = self.table.write(key, value);
if res.is_ok() {
self.last_key = Some(cloned_key);
}
res
}
}
/// Writes a tombstone for a key.
///
/// # Errors
///
/// Will return `Err` if an IO error occurs.
pub fn write_tombstone(&mut self, key: UserKey) -> crate::Result<()> {
if let Some(prev) = &self.last_key {
assert!(
self.tree.index.config.comparator.compare(prev, &key) == Ordering::Less,
"next key in ingestion must be ordered after last key by configured comparator"
);
}
let cloned_key = key.clone();
let res = self.table.write_tombstone(key);
if res.is_ok() {
self.last_key = Some(cloned_key);
}
res
}
/// Writes a weak tombstone for a key.
///
/// # Errors
///
/// Will return `Err` if an IO error occurs.
pub fn write_weak_tombstone(&mut self, key: UserKey) -> crate::Result<()> {
if let Some(prev) = &self.last_key {
assert!(
self.tree.index.config.comparator.compare(prev, &key) == Ordering::Less,
"next key in ingestion must be ordered after last key by configured comparator"
);
}
let cloned_key = key.clone();
let res = self.table.write_weak_tombstone(key);
if res.is_ok() {
self.last_key = Some(cloned_key);
}
res
}
/// Finishes the ingestion.
///
/// # Errors
///
/// Will return `Err` if an IO error occurs.
#[allow(clippy::significant_drop_tightening)]
pub fn finish(self) -> crate::Result<()> {
use crate::AbstractTree;
let index = self.index().clone();
// CRITICAL SECTION: Atomic flush + seqno allocation + registration
//
// For BlobTree, we must coordinate THREE components atomically:
// 1. Index tree memtable flush
// 2. Value log blob files
// 3. Index tree tables (with blob indirections)
//
// The sequence ensures all components see the same global_seqno:
// 1. Acquire flush lock on index tree
// 2. Flush index tree active memtable
// 3. Finalize blob writer (creates blob files)
// 4. Finalize table writer (creates index tables)
// 5. Allocate next global seqno
// 6. Recover tables with that seqno
// 7. Register version with same seqno + blob files
//
// This prevents race conditions where blob files and their index
// entries could have mismatched sequence numbers.
let flush_lock = index.get_flush_lock();
// Flush any pending index memtable writes to ensure ingestion sees
// a consistent snapshot of the index.
// We call rotate + flush directly because we already hold the lock.
index.rotate_memtable();
index.flush(&flush_lock, 0)?;
// Finalize the blob writer first, ensuring all large values are
// written to blob files before we finalize the index tables that
// reference them.
let blob_files = self.blob.finish()?;
// Finalize the table writer, creating index tables with blob
// indirections pointing to the blob files we just created.
let results = self.table.writer.finish()?;
// Acquire locks for version registration on the index tree. We must
// hold both the compaction state lock and version history lock to
// safely modify the tree's version.
#[expect(clippy::expect_used, reason = "lock is expected to not be poisoned")]
let mut _compaction_state = index.compaction_state.lock().expect("lock is poisoned");
#[expect(clippy::expect_used, reason = "lock is expected to not be poisoned")]
let mut version_lock = index.version_history.write().expect("lock is poisoned");
// Allocate the next global sequence number. This seqno will be shared
// by all ingested tables, blob files, and the version that registers
// them, ensuring consistent MVCC snapshots across the value log.
let global_seqno = index.config.seqno.next();
// Recover all created index tables, assigning them the global_seqno
// we just allocated. These tables contain indirections to the blob
// files created above, so they must share the same sequence number
// for MVCC correctness.
//
// We intentionally do NOT pin filter/index blocks here. Large ingests
// are typically placed in level 1, and pinning would increase memory
// pressure unnecessarily.
let table_folder = &self.table.folder;
let created_tables = results
.into_iter()
.map(|(table_id, checksum)| -> crate::Result<Table> {
Table::recover(
table_folder.join(table_id.to_string()),
checksum,
global_seqno,
index.id,
index.config.cache.clone(),
index.config.descriptor_table.clone(),
false,
false,
index.config.encryption.clone(),
#[cfg(zstd_any)]
index.config.zstd_dictionary.clone(),
index.config.comparator.clone(),
#[cfg(feature = "metrics")]
index.metrics.clone(),
)
})
.collect::<crate::Result<Vec<_>>>()?;
// Upgrade the version with our ingested tables and blob files, using
// the global_seqno we allocated earlier. This ensures the version,
// tables, and blob files all share the same sequence number, which is
// critical for GC correctness - we must not delete blob files that are
// still referenced by visible snapshots.
//
// We use upgrade_version_with_seqno (instead of upgrade_version) because
// we need precise control over the seqno: it must match the seqno we
// already assigned to the recovered tables.
version_lock.upgrade_version_with_seqno(
&index.config.path,
|current| {
let mut copy = current.clone();
let ctx = crate::version::TransformContext::new(index.config.comparator.as_ref());
copy.version =
copy.version
.with_new_l0_run(&created_tables, Some(&blob_files), None, &ctx);
Ok(copy)
},
global_seqno,
&self.tree.index.config.visible_seqno,
&*self.tree.index.config.fs,
)?;
// Perform maintenance on the version history (e.g., clean up old versions).
// We use gc_watermark=0 since ingestion doesn't affect sealed memtables.
if let Err(e) = version_lock.maintenance(&index.config.path, 0) {
log::warn!("Version GC failed: {e:?}");
}
Ok(())
}
#[inline]
fn index(&self) -> &crate::Tree {
&self.tree.index
}
}