hi_sparse_bitset/cache.rs
1//! Cache used by [BlockIter]/[IndexIter] for [reduce] operations.
2//!
3//! # Memory footprint
4//!
5//! Cache for one [BitSet] costs 2 pointers.
6//!
7//! [BitSet]: crate::BitSet
8//! [BlockIter]: crate::iter::BlockIter
9//! [IndexIter]: crate::iter::IndexIter
10//! [reduce]: crate::reduce()
11
12use crate::ops::BitSetOp;
13use crate::bitset_interface::{BitSetBase, LevelMasksIterExt};
14use crate::reduce::{DynamicCacheImpl, FixedCacheImpl, NonCachedImpl, ReduceCacheImpl};
15
16/// Cache is not used.
17///
18/// If reduced iterator contains other nested reduce operations - all of them
19/// WILL NOT use cache as well.
20///
21/// # Example 1
22///
23/// ```
24/// # use itertools::assert_equal;
25/// # use hi_sparse_bitset::{reduce, reduce_w_cache};
26/// # use hi_sparse_bitset::ops::{And, Or};
27/// # use hi_sparse_bitset::cache::NoCache;
28/// # type BitSet = hi_sparse_bitset::BitSet<hi_sparse_bitset::config::_128bit>;
29/// let su1 = [BitSet::from([1,2]), BitSet::from([5,6])];
30/// let union1 = reduce(Or, su1.iter()).unwrap();
31///
32/// let su2 = [BitSet::from([1,3]), BitSet::from([4,6])];
33/// let union2 = reduce(Or, su2.iter()).unwrap();
34///
35/// let si = [union1, union2];
36/// let intersection = reduce_w_cache(And, si.iter(), NoCache).unwrap();
37///
38/// // Not only `intersection` will be computed without cache,
39/// // but also `union1` and `union2`.
40/// assert_equal(intersection, [1,6]);
41///
42/// ```
43///
44/// # Example 2
45///
46/// ```
47/// # use itertools::assert_equal;
48/// # use hi_sparse_bitset::{reduce, reduce_w_cache};
49/// # use hi_sparse_bitset::ops::{And, Or};
50/// # use hi_sparse_bitset::cache::NoCache;
51/// # type BitSet = hi_sparse_bitset::BitSet<hi_sparse_bitset::config::_128bit>;
52/// let su1 = [BitSet::from([1,2]), BitSet::from([5,6])];
53/// let union1 = reduce_w_cache(Or, su1.iter(), NoCache).unwrap();
54///
55/// let su2 = [BitSet::from([1,3]), BitSet::from([4,6])];
56/// let union2 = reduce_w_cache(Or, su2.iter(), NoCache).unwrap();
57///
58/// let si = [union1, union2];
59/// let intersection = reduce(And, si.iter()).unwrap();
60///
61/// // Only `union1` and `union2` will not use cache, `intersection`
62/// // will be computed with cache.
63/// assert_equal(intersection, [1,6]);
64///
65/// ```
66///
67/// [reduce]: crate::reduce()
68#[derive(Default, Copy, Clone)]
69pub struct NoCache;
70
71/// Cache with fixed capacity.
72///
73/// This cache is noop to construct.
74/// Should be your default choice.
75///
76/// N.B. Pay attention to stack-mem usage when working with
77/// reduce on reduce on reduce ...
78#[derive(Default, Copy, Clone)]
79pub struct FixedCache<const N:usize>;
80
81/// Dynamically built in-heap cache.
82///
83/// You want this, when your cache doesn't fit stack.
84/// This can happened, when you work with enormously large number of sets,
85/// and/or work with deep [reduce] operations. Alternatively, you
86/// can use [NoCache].
87///
88/// [reduce]: crate::reduce()
89#[derive(Default, Copy, Clone)]
90pub struct DynamicCache;
91
92pub trait ReduceCache: Default + 'static{
93 /// usize::MAX - if unlimited.
94 const MAX_LEN: usize;
95 type Impl<Op, S>
96 : ReduceCacheImpl<
97 Sets = S,
98 Conf = <S::Item as BitSetBase>::Conf
99 >
100 where
101 Op: BitSetOp,
102 S: Iterator + Clone,
103 S::Item: LevelMasksIterExt;
104}
105
106impl ReduceCache for NoCache{
107 const MAX_LEN: usize = usize::MAX;
108 type Impl<Op, S> = NonCachedImpl<Op, S>
109 where
110 Op: BitSetOp,
111 S: Iterator + Clone,
112 S::Item: LevelMasksIterExt;
113}
114
115impl<const N: usize> ReduceCache for FixedCache<N>{
116 const MAX_LEN: usize = N;
117 type Impl<Op, S> = FixedCacheImpl<Op, S, N>
118 where
119 Op: BitSetOp,
120 S: Iterator + Clone,
121 S::Item: LevelMasksIterExt;
122}
123
124impl ReduceCache for DynamicCache{
125 const MAX_LEN: usize = usize::MAX;
126 type Impl<Op, S> = DynamicCacheImpl<Op, S>
127 where
128 Op: BitSetOp,
129 S: Iterator + Clone,
130 S::Item: LevelMasksIterExt;
131}