1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
//! Compaction filter API for custom entry filtering during compaction.
//!
//! This module provides traits for implementing custom compaction filters that can
//! inspect, drop, convert to tombstone, or modify entries during the compaction process.
//!
//! **Warning:** When compaction filters are configured, snapshot consistency may be
//! affected. Filters may modify or drop entries that active snapshots expect to see,
//! causing snapshot reads to return unexpected results. Users who need consistent
//! snapshots should carefully consider their filter logic.
//!
//! # Example
//!
//! ```no_run
//! use async_trait::async_trait;
//! use bytes::Bytes;
//! use slatedb::{
//! CompactionFilter, CompactionFilterSupplier, CompactionJobContext,
//! CompactionFilterDecision, CompactionFilterError, RowEntry, ValueDeletable,
//! };
//!
//! /// A filter that converts all entries with a specific key prefix to tombstones.
//! struct PrefixTombstoneFilter {
//! prefix: Bytes,
//! tombstone_count: u64,
//! }
//!
//! #[async_trait]
//! impl CompactionFilter for PrefixTombstoneFilter {
//! async fn filter(&mut self, entry: &RowEntry) -> Result<CompactionFilterDecision, CompactionFilterError> {
//! if entry.key.starts_with(&self.prefix) {
//! self.tombstone_count += 1;
//! Ok(CompactionFilterDecision::Modify(ValueDeletable::Tombstone))
//! } else {
//! Ok(CompactionFilterDecision::Keep)
//! }
//! }
//!
//! async fn on_compaction_end(&mut self) -> Result<(), CompactionFilterError> {
//! println!(
//! "Compaction converted {} entries with prefix {:?} to tombstones",
//! self.tombstone_count,
//! self.prefix
//! );
//! Ok(())
//! }
//! }
//!
//! struct PrefixTombstoneFilterSupplier {
//! prefix: Bytes,
//! }
//!
//! #[async_trait]
//! impl CompactionFilterSupplier for PrefixTombstoneFilterSupplier {
//! async fn create_compaction_filter(
//! &self,
//! _context: &CompactionJobContext,
//! ) -> Result<Box<dyn CompactionFilter>, CompactionFilterError> {
//! Ok(Box::new(PrefixTombstoneFilter {
//! prefix: self.prefix.clone(),
//! tombstone_count: 0,
//! }))
//! }
//! }
//!
//! // Then pass the supplier to Db::builder:
//! // db.builder("mydb", object_store)
//! // .with_compaction_filter_supplier(Arc::new(supplier))
//! // .build()
//! // .await
//! ```
use crate;
use async_trait;
use Error;
/// Context information about a compaction job.
///
/// This struct provides read-only information about the current compaction job
/// to help filters make informed decisions.
/// Decision returned by a compaction filter for each entry.
/// Errors that can occur during compaction filter operations.
/// Filter that processes entries during compaction.
///
/// Each filter instance is created for a single compaction job and executes
/// single-threaded on the compactor thread.
///
/// # Performance
///
/// The `filter()` method is called for every entry during compaction. While it
/// is async to allow I/O operations, frequent I/O will impact compaction throughput.
///
/// If your filter requires expensive computation, configure a dedicated
/// compaction runtime using [`crate::DbBuilder::with_compaction_runtime`] to
/// prevent blocking your application's main runtime.
///
/// # Snapshot Consistency Warning
///
/// When compaction filters are configured, snapshot consistency may be affected.
/// Filters may modify or drop entries that active snapshots expect to see, causing
/// snapshot reads to return unexpected results. Users who need consistent snapshots
/// should carefully consider their filter logic.
/// Factory that creates a [`CompactionFilter`] instance per compaction job.
///
/// The supplier is shared across all compactions and must be thread-safe (`Send + Sync`).
/// It creates a new filter instance for each compaction job, providing isolated state per job.