lsm_tree/compaction/
leveled.rs

1// Copyright (c) 2024-present, fjall-rs
2// This source code is licensed under both the Apache 2.0 and MIT License
3// (found in the LICENSE-* files in the repository)
4
5use super::{Choice, CompactionStrategy, Input as CompactionInput};
6use crate::{
7    compaction::state::{hidden_set::HiddenSet, CompactionState},
8    config::Config,
9    slice_windows::{GrowingWindowsExt, ShrinkingWindowsExt},
10    table::Table,
11    version::{run::Ranged, Run, Version},
12    HashSet, KeyRange, TableId,
13};
14
15pub fn aggregate_run_key_range(tables: &[Table]) -> KeyRange {
16    let lo = tables.first().expect("run should never be empty");
17    let hi = tables.last().expect("run should never be empty");
18    KeyRange::new((lo.key_range().min().clone(), hi.key_range().max().clone()))
19}
20
21/// Tries to find the most optimal compaction set from one level into the other.
22fn pick_minimal_compaction(
23    curr_run: &Run<Table>,
24    next_run: Option<&Run<Table>>,
25    hidden_set: &HiddenSet,
26    overshoot: u64,
27    table_base_size: u64,
28) -> Option<(HashSet<TableId>, bool)> {
29    // NOTE: Find largest trivial move (if it exists)
30    if let Some(window) = curr_run.shrinking_windows().find(|window| {
31        if hidden_set.is_blocked(window.iter().map(Table::id)) {
32            // IMPORTANT: Compaction is blocked because of other
33            // on-going compaction
34            return false;
35        }
36
37        let Some(next_run) = &next_run else {
38            // No run in next level, so we can trivially move
39            return true;
40        };
41
42        let key_range = aggregate_run_key_range(window);
43
44        next_run.get_overlapping(&key_range).is_empty()
45    }) {
46        let ids = window.iter().map(Table::id).collect();
47        return Some((ids, true));
48    }
49
50    // NOTE: Look for merges
51    if let Some(next_run) = &next_run {
52        next_run
53            .growing_windows()
54            .take_while(|window| {
55                // Cap at 50x tables per compaction for now
56                //
57                // At this point, all compactions are too large anyway
58                // so we can escape early
59                let next_level_size = window.iter().map(Table::file_size).sum::<u64>();
60                next_level_size <= (50 * table_base_size)
61            })
62            .filter_map(|window| {
63                if hidden_set.is_blocked(window.iter().map(Table::id)) {
64                    // IMPORTANT: Compaction is blocked because of other
65                    // on-going compaction
66                    return None;
67                }
68
69                let key_range = aggregate_run_key_range(window);
70
71                // Pull in all contained tables in current level into compaction
72                let curr_level_pull_in = curr_run.get_contained(&key_range);
73
74                let curr_level_size = curr_level_pull_in.iter().map(Table::file_size).sum::<u64>();
75
76                // if curr_level_size < overshoot {
77                //     return None;
78                // }
79
80                if hidden_set.is_blocked(curr_level_pull_in.iter().map(Table::id)) {
81                    // IMPORTANT: Compaction is blocked because of other
82                    // on-going compaction
83                    return None;
84                }
85
86                let next_level_size = window.iter().map(Table::file_size).sum::<u64>();
87
88                //  let compaction_bytes = curr_level_size + next_level_size;
89
90                #[allow(clippy::cast_precision_loss)]
91                let write_amp = (next_level_size as f32) / (curr_level_size as f32);
92
93                Some((window, curr_level_pull_in, write_amp))
94            })
95            // Find the compaction with the smallest write amplification factor
96            .min_by(|a, b| a.2.partial_cmp(&b.2).unwrap_or(std::cmp::Ordering::Equal))
97            .map(|(window, curr_level_pull_in, _)| {
98                let mut ids: HashSet<_> = window.iter().map(Table::id).collect();
99                ids.extend(curr_level_pull_in.iter().map(Table::id));
100                (ids, false)
101            })
102    } else {
103        None
104    }
105}
106
107/// Levelled compaction strategy (LCS)
108///
109/// When a level reaches some threshold size, parts of it are merged into overlapping tables in the next level.
110///
111/// Each level Ln for n >= 2 can have up to `level_base_size * ratio^(n - 1)` tables.
112///
113/// LCS suffers from comparatively high write amplification, but has decent read amplification and great space amplification (~1.1x).
114///
115/// LCS is the recommended compaction strategy to use.
116///
117/// More info here: <https://fjall-rs.github.io/post/lsm-leveling/>
118#[derive(Clone)]
119pub struct Strategy {
120    /// When the number of tables in L0 reaches this threshold,
121    /// they are merged into L1.
122    ///
123    /// Default = 4
124    ///
125    /// Same as `level0_file_num_compaction_trigger` in `RocksDB`.
126    pub l0_threshold: u8,
127
128    /// The target table size as disk (possibly compressed).
129    ///
130    /// Default = 64 MiB
131    ///
132    /// Same as `target_file_size_base` in `RocksDB`.
133    pub target_size: u64,
134
135    /// Size ratio between levels of the LSM tree (a.k.a fanout, growth rate)
136    ///
137    /// This is the exponential growth of the from one.
138    /// level to the next.
139    ///
140    /// Default = 10
141    #[allow(clippy::doc_markdown)]
142    pub level_ratio_policy: Vec<f32>,
143}
144
145impl Default for Strategy {
146    fn default() -> Self {
147        Self {
148            l0_threshold: 4,
149            target_size:/* 64 Mib */ 64 * 1_024 * 1_024,
150            level_ratio_policy: vec![10.0],
151        }
152    }
153}
154
155impl Strategy {
156    /// Sets the growth ratio between levels.
157    #[must_use]
158    pub fn with_level_ratio_policy(mut self, policy: Vec<f32>) -> Self {
159        self.level_ratio_policy = policy;
160        self
161    }
162
163    /// Calculates the size of L1.
164    fn level_base_size(&self) -> u64 {
165        self.target_size * u64::from(self.l0_threshold)
166    }
167
168    /// Calculates the level target size.
169    ///
170    /// L1 = `level_base_size`
171    ///
172    /// L2 = `level_base_size * ratio`
173    ///
174    /// L3 = `level_base_size * ratio * ratio`
175    ///
176    /// ...
177    fn level_target_size(&self, canonical_level_idx: u8) -> u64 {
178        assert!(
179            canonical_level_idx >= 1,
180            "level_target_size does not apply to L0",
181        );
182
183        if canonical_level_idx == 1 {
184            // u64::from(self.target_size)
185            self.level_base_size()
186        } else {
187            let mut size = self.level_base_size() as f32;
188
189            // NOTE: Minus 2 because |{L0, L1}|
190            for idx in 0..=(canonical_level_idx - 2) {
191                let ratio = self
192                    .level_ratio_policy
193                    .get(usize::from(idx))
194                    .copied()
195                    .unwrap_or_else(|| self.level_ratio_policy.last().copied().unwrap_or(10.0));
196
197                size *= ratio;
198            }
199
200            size as u64
201        }
202    }
203}
204
205impl CompactionStrategy for Strategy {
206    fn get_name(&self) -> &'static str {
207        "LeveledCompaction"
208    }
209
210    fn get_config(&self) -> Vec<crate::KvPair> {
211        vec![
212            (
213                crate::UserKey::from("leveled_l0_threshold"),
214                crate::UserValue::from(self.l0_threshold.to_le_bytes()),
215            ),
216            (
217                crate::UserKey::from("leveled_target_size"),
218                crate::UserValue::from(self.target_size.to_le_bytes()),
219            ),
220            (
221                crate::UserKey::from("leveled_level_ratio_policy"),
222                crate::UserValue::from({
223                    use byteorder::{LittleEndian, WriteBytesExt};
224
225                    let mut v = vec![];
226
227                    v.write_u8(self.level_ratio_policy.len() as u8)
228                        .expect("cannot fail");
229
230                    for &f in &self.level_ratio_policy {
231                        v.write_f32::<LittleEndian>(f).expect("cannot fail");
232                    }
233
234                    v
235                }),
236            ),
237        ]
238    }
239
240    #[allow(clippy::too_many_lines)]
241    fn choose(&self, version: &Version, _: &Config, state: &CompactionState) -> Choice {
242        assert!(version.level_count() == 7, "should have exactly 7 levels");
243
244        // Find the level that corresponds to L1
245        #[allow(clippy::map_unwrap_or)]
246        let mut canonical_l1_idx = version
247            .iter_levels()
248            .enumerate()
249            .skip(1)
250            .find(|(_, lvl)| !lvl.is_empty())
251            .map(|(idx, _)| idx)
252            .unwrap_or_else(|| version.level_count() - 1);
253
254        // Number of levels we have to shift to get from the actual level idx to the canonical
255        let mut level_shift = canonical_l1_idx - 1;
256
257        if canonical_l1_idx > 1 && version.iter_levels().skip(1).any(|lvl| !lvl.is_empty()) {
258            let need_new_l1 = version
259                .iter_levels()
260                .enumerate()
261                .skip(1)
262                .filter(|(_, lvl)| !lvl.is_empty())
263                .all(|(idx, level)| {
264                    let level_size = level
265                        .iter()
266                        .flat_map(|x| x.iter())
267                        // NOTE: Take bytes that are already being compacted into account,
268                        // otherwise we may be overcompensating
269                        .filter(|x| !state.hidden_set().is_hidden(x.id()))
270                        .map(Table::file_size)
271                        .sum::<u64>();
272
273                    let target_size = self.level_target_size((idx - level_shift) as u8);
274
275                    level_size > target_size
276                });
277
278            // Move up L1 one level if all current levels are at capacity
279            if need_new_l1 {
280                canonical_l1_idx -= 1;
281                level_shift -= 1;
282            }
283        }
284
285        // Scoring
286        let mut scores = [(/* score */ 0.0, /* overshoot */ 0u64); 7];
287
288        {
289            // TODO(weak-tombstone-rewrite): incorporate `Table::weak_tombstone_count` and
290            // `Table::weak_tombstone_reclaimable` when computing level scores so rewrite
291            // decisions can prioritize tables that would free the most reclaimable values.
292
293            // Score first level
294
295            // NOTE: We always have at least one level
296            #[allow(clippy::expect_used)]
297            let first_level = version.l0();
298
299            // TODO: use run_count instead? but be careful because of version free list GC thingy
300            if first_level.table_count() >= usize::from(self.l0_threshold) {
301                let ratio = (first_level.table_count() as f64) / f64::from(self.l0_threshold);
302                scores[0] = (ratio, 0);
303            }
304
305            // Score L1+
306            for (idx, level) in version.iter_levels().enumerate().skip(1) {
307                if level.is_empty() {
308                    continue;
309                }
310
311                let level_size = level
312                    .iter()
313                    .flat_map(|x| x.iter())
314                    // NOTE: Take bytes that are already being compacted into account,
315                    // otherwise we may be overcompensating
316                    .filter(|x| !state.hidden_set().is_hidden(x.id()))
317                    .map(Table::file_size)
318                    .sum::<u64>();
319
320                let target_size = self.level_target_size((idx - level_shift) as u8);
321
322                // NOTE: We check for level length above
323                #[allow(clippy::indexing_slicing)]
324                if level_size > target_size {
325                    scores[idx] = (
326                        level_size as f64 / target_size as f64,
327                        level_size - target_size,
328                    );
329
330                    // NOTE: Force a trivial move
331                    if version
332                        .level(idx + 1)
333                        .is_some_and(|next_level| next_level.is_empty())
334                    {
335                        scores[idx] = (99.99, 999);
336                    }
337                }
338            }
339
340            // NOTE: Never score Lmax
341            //
342            // NOTE: We check for level length above
343            #[allow(clippy::indexing_slicing)]
344            {
345                scores[6] = (0.0, 0);
346            }
347        }
348
349        // Choose compaction
350        let (level_idx_with_highest_score, (score, overshoot_bytes)) = scores
351            .into_iter()
352            .enumerate()
353            .max_by(|(_, (score_a, _)), (_, (score_b, _))| {
354                score_a
355                    .partial_cmp(score_b)
356                    .unwrap_or(std::cmp::Ordering::Equal)
357            })
358            .expect("should have highest score somewhere");
359
360        if score < 1.0 {
361            return Choice::DoNothing;
362        }
363
364        // We choose L0->L1 compaction
365        if level_idx_with_highest_score == 0 {
366            let Some(first_level) = version.level(0) else {
367                return Choice::DoNothing;
368            };
369
370            if version.level_is_busy(0, state.hidden_set())
371                || version.level_is_busy(canonical_l1_idx, state.hidden_set())
372            {
373                return Choice::DoNothing;
374            }
375
376            let Some(target_level) = &version.level(canonical_l1_idx) else {
377                return Choice::DoNothing;
378            };
379
380            let mut table_ids: HashSet<u64> = first_level.list_ids();
381
382            let key_range = first_level.aggregate_key_range();
383
384            // Get overlapping tables in next level
385            let target_level_overlapping_table_ids: Vec<_> = target_level
386                .iter()
387                .flat_map(|run| run.get_overlapping(&key_range))
388                .map(Table::id)
389                .collect();
390
391            table_ids.extend(&target_level_overlapping_table_ids);
392
393            let choice = CompactionInput {
394                table_ids,
395                dest_level: canonical_l1_idx as u8,
396                canonical_level: 1,
397                target_size: self.target_size,
398            };
399
400            /* eprintln!(
401                "merge {} tables, L0->L1: {:?}",
402                choice.segment_ids.len(),
403                choice.segment_ids,
404            ); */
405
406            if target_level_overlapping_table_ids.is_empty() && first_level.is_disjoint() {
407                return Choice::Move(choice);
408            }
409            return Choice::Merge(choice);
410        }
411
412        // We choose L1+ compaction instead
413
414        // NOTE: Level count is 255 max
415        #[allow(clippy::cast_possible_truncation)]
416        let curr_level_index = level_idx_with_highest_score as u8;
417
418        let next_level_index = curr_level_index + 1;
419
420        let Some(level) = version.level(level_idx_with_highest_score) else {
421            return Choice::DoNothing;
422        };
423
424        let Some(next_level) = version.level(next_level_index as usize) else {
425            return Choice::DoNothing;
426        };
427
428        debug_assert!(level.is_disjoint(), "level should be disjoint");
429        debug_assert!(next_level.is_disjoint(), "next level should be disjoint");
430
431        let Some((table_ids, can_trivial_move)) = pick_minimal_compaction(
432            level.first_run().expect("should have exactly one run"),
433            next_level.first_run().map(std::ops::Deref::deref),
434            state.hidden_set(),
435            overshoot_bytes,
436            self.target_size,
437        ) else {
438            return Choice::DoNothing;
439        };
440
441        let choice = CompactionInput {
442            table_ids,
443            dest_level: next_level_index,
444            canonical_level: next_level_index - (level_shift as u8),
445            target_size: self.target_size,
446        };
447
448        /* eprintln!(
449            "{} {} tables, L{}->L{next_level_index}: {:?}",
450            if can_trivial_move { "move" } else { "merge" },
451            choice.segment_ids.len(),
452            next_level_index - 1,
453            choice.segment_ids,
454        ); */
455
456        if can_trivial_move && level.is_disjoint() {
457            return Choice::Move(choice);
458        }
459        Choice::Merge(choice)
460    }
461}
462
463/*
464#[cfg(test)]
465mod tests {
466    use super::{Choice, Strategy};
467    use crate::{
468        cache::Cache,
469        compaction::{CompactionStrategy, Input as CompactionInput},
470        descriptor_table::FileDescriptorTable,
471        level_manifest::LevelManifest,
472        segment::{
473            block::offset::BlockOffset,
474            block_index::{two_level_index::TwoLevelBlockIndex, BlockIndexImpl},
475            file_offsets::FileOffsets,
476            meta::{Metadata, SegmentId},
477            SegmentInner,
478        },
479        super_segment::Segment,
480        time::unix_timestamp,
481        Config, HashSet, KeyRange,
482    };
483    use std::{
484        path::Path,
485        sync::{atomic::AtomicBool, Arc},
486    };
487    use test_log::test;
488
489    fn string_key_range(a: &str, b: &str) -> KeyRange {
490        KeyRange::new((a.as_bytes().into(), b.as_bytes().into()))
491    }
492
493    #[allow(
494        clippy::expect_used,
495        clippy::cast_possible_truncation,
496        clippy::cast_sign_loss
497    )]
498    fn fixture_segment(
499        id: SegmentId,
500        key_range: KeyRange,
501        size: u64,
502        tombstone_ratio: f32,
503    ) -> Segment {
504        todo!()
505
506        /*   let cache = Arc::new(Cache::with_capacity_bytes(10 * 1_024 * 1_024));
507
508        let block_index = TwoLevelBlockIndex::new((0, id).into(), cache.clone());
509        let block_index = Arc::new(BlockIndexImpl::TwoLevel(block_index));
510
511        SegmentInner {
512            tree_id: 0,
513            descriptor_table: Arc::new(FileDescriptorTable::new(512, 1)),
514            block_index,
515
516            offsets: FileOffsets {
517                bloom_ptr: BlockOffset(0),
518                range_filter_ptr: BlockOffset(0),
519                index_block_ptr: BlockOffset(0),
520                metadata_ptr: BlockOffset(0),
521                range_tombstones_ptr: BlockOffset(0),
522                tli_ptr: BlockOffset(0),
523                pfx_ptr: BlockOffset(0),
524            },
525
526            metadata: Metadata {
527                data_block_count: 0,
528                index_block_count: 0,
529                data_block_size: 4_096,
530                index_block_size: 4_096,
531                created_at: unix_timestamp().as_nanos(),
532                id,
533                file_size: size,
534                compression: crate::segment::meta::CompressionType::None,
535                table_type: crate::segment::meta::TableType::Block,
536                item_count: 1_000_000,
537                key_count: 0,
538                key_range,
539                tombstone_count: (1_000_000.0 * tombstone_ratio) as u64,
540                range_tombstone_count: 0,
541                uncompressed_size: 0,
542                seqnos: (0, 0),
543            },
544            cache,
545
546            bloom_filter: Some(crate::bloom::BloomFilter::with_fp_rate(1, 0.1)),
547
548            path: "a".into(),
549            is_deleted: AtomicBool::default(),
550        }
551        .into() */
552    }
553
554    #[allow(clippy::expect_used)]
555    fn build_levels(
556        path: &Path,
557        recipe: Vec<Vec<(SegmentId, &str, &str, u64)>>,
558    ) -> crate::Result<LevelManifest> {
559        let mut levels = LevelManifest::create_new(
560            recipe.len().try_into().expect("oopsie"),
561            path.join("levels"),
562        )?;
563
564        for (idx, level) in recipe.into_iter().enumerate() {
565            for (id, min, max, size_mib) in level {
566                levels.insert_into_level(
567                    idx.try_into().expect("oopsie"),
568                    fixture_segment(
569                        id,
570                        string_key_range(min, max),
571                        size_mib * 1_024 * 1_024,
572                        0.0,
573                    ),
574                );
575            }
576        }
577
578        Ok(levels)
579    }
580
581    #[test]
582    fn leveled_empty_levels() -> crate::Result<()> {
583        let tempdir = tempfile::tempdir()?;
584        let compactor = Strategy::default();
585
586        #[rustfmt::skip]
587        let levels = build_levels(tempdir.path(), vec![
588            vec![],
589            vec![],
590            vec![],
591            vec![],
592        ])?;
593
594        assert_eq!(
595            compactor.choose(&levels, &Config::default()),
596            Choice::DoNothing
597        );
598
599        Ok(())
600    }
601
602    #[test]
603    fn leveled_default_l0() -> crate::Result<()> {
604        let tempdir = tempfile::tempdir()?;
605        let compactor = Strategy {
606            target_size: 64 * 1_024 * 1_024,
607            ..Default::default()
608        };
609
610        #[rustfmt::skip]
611        let mut levels = build_levels(tempdir.path(), vec![
612            vec![(1, "a", "z", 64), (2, "a", "z", 64), (3, "a", "z", 64), (4, "a", "z", 64)],
613            vec![],
614            vec![],
615            vec![],
616        ])?;
617
618        assert_eq!(
619            compactor.choose(&levels, &Config::default()),
620            Choice::Merge(CompactionInput {
621                dest_level: 1,
622                segment_ids: [1, 2, 3, 4].into_iter().collect::<HashSet<_>>(),
623                target_size: 64 * 1_024 * 1_024
624            })
625        );
626
627        levels.hide_segments(std::iter::once(4));
628
629        assert_eq!(
630            compactor.choose(&levels, &Config::default()),
631            Choice::DoNothing
632        );
633
634        Ok(())
635    }
636
637    #[test]
638    #[allow(
639        clippy::cast_sign_loss,
640        clippy::cast_precision_loss,
641        clippy::cast_possible_truncation
642    )]
643    fn leveled_intra_l0() -> crate::Result<()> {
644        let tempdir = tempfile::tempdir()?;
645        let compactor = Strategy {
646            target_size: 64 * 1_024 * 1_024,
647            ..Default::default()
648        };
649
650        #[rustfmt::skip]
651        let mut levels = build_levels(tempdir.path(), vec![
652            vec![(1, "a", "z", 1), (2, "a", "z", 1), (3, "a", "z", 1), (4, "a", "z", 1)],
653            vec![],
654            vec![],
655            vec![],
656        ])?;
657
658        assert_eq!(
659            compactor.choose(&levels, &Config::default()),
660            Choice::Merge(CompactionInput {
661                dest_level: 0,
662                segment_ids: [1, 2, 3, 4].into_iter().collect::<HashSet<_>>(),
663                target_size: u64::from(compactor.target_size),
664            })
665        );
666
667        levels.hide_segments(std::iter::once(4));
668
669        assert_eq!(
670            compactor.choose(&levels, &Config::default()),
671            Choice::DoNothing
672        );
673
674        Ok(())
675    }
676
677    #[test]
678    fn leveled_more_than_min_no_overlap() -> crate::Result<()> {
679        let tempdir = tempfile::tempdir()?;
680        let compactor = Strategy {
681            target_size: 64 * 1_024 * 1_024,
682            ..Default::default()
683        };
684
685        #[rustfmt::skip]
686        let levels = build_levels(tempdir.path(), vec![
687            vec![(1, "h", "t", 64), (2, "h", "t", 64), (3, "h", "t", 64), (4, "h", "t", 64)],
688            vec![(5, "a", "g", 64), (6, "a", "g", 64), (7, "a", "g", 64), (8, "a", "g", 64)],
689            vec![],
690            vec![],
691        ])?;
692
693        assert_eq!(
694            compactor.choose(&levels, &Config::default()),
695            Choice::Merge(CompactionInput {
696                dest_level: 1,
697                segment_ids: [1, 2, 3, 4].into_iter().collect::<HashSet<_>>(),
698                target_size: 64 * 1_024 * 1_024
699            })
700        );
701
702        Ok(())
703    }
704
705    #[test]
706    fn leveled_more_than_min_with_overlap() -> crate::Result<()> {
707        let tempdir = tempfile::tempdir()?;
708        let compactor = Strategy {
709            target_size: 64 * 1_024 * 1_024,
710            ..Default::default()
711        };
712
713        #[rustfmt::skip]
714        let mut levels = build_levels(tempdir.path(), vec![
715            vec![(1, "a", "g", 64), (2, "h", "t", 64), (3, "i", "t", 64), (4, "j", "t", 64)],
716            vec![(5, "a", "g", 64), (6, "a", "g", 64), (7, "y", "z", 64), (8, "y", "z", 64)],
717            vec![],
718            vec![],
719        ])?;
720
721        assert_eq!(
722            compactor.choose(&levels, &Config::default()),
723            Choice::Merge(CompactionInput {
724                dest_level: 1,
725                segment_ids: [1, 2, 3, 4, 5, 6].into_iter().collect::<HashSet<_>>(),
726                target_size: 64 * 1_024 * 1_024
727            })
728        );
729
730        levels.hide_segments(std::iter::once(5));
731        assert_eq!(
732            compactor.choose(&levels, &Config::default()),
733            Choice::DoNothing
734        );
735
736        Ok(())
737    }
738
739    #[test]
740    fn levelled_from_tiered() -> crate::Result<()> {
741        let tempdir = tempfile::tempdir()?;
742        let compactor = Strategy {
743            target_size: 64 * 1_024 * 1_024,
744            ..Default::default()
745        };
746        let config = Config::default();
747
748        #[rustfmt::skip]
749        let levels = build_levels(tempdir.path(), vec![
750            vec![],
751            vec![(1, "a", "z", 64), (2, "a", "z", 64), (3, "g", "z", 64), (5, "g", "z", 64), (6, "g", "z", 64)],
752            vec![(4, "a", "g", 64)],
753            vec![],
754        ])?;
755
756        assert_eq!(
757            compactor.choose(&levels, &config),
758            Choice::Merge(CompactionInput {
759                dest_level: 2,
760                segment_ids: [1, 2, 3, 4, 5, 6].into_iter().collect::<HashSet<_>>(),
761                target_size: 64 * 1_024 * 1_024
762            })
763        );
764
765        Ok(())
766    }
767}
768 */