differential_dataflow/algorithms/prefix_sum.rs
1//! Implementation of Parallel Prefix Sum
2
3use timely::dataflow::Scope;
4
5use crate::{Collection, ExchangeData};
6use crate::lattice::Lattice;
7use crate::operators::*;
8
9/// Extension trait for the prefix_sum method.
10pub trait PrefixSum<G: Scope, K, D> {
11 /// Computes the prefix sum for each element in the collection.
12 ///
13 /// The prefix sum is data-parallel, in the sense that the sums are computed independently for
14 /// each key of type `K`. For a single prefix sum this type can be `()`, but this permits the
15 /// more general accumulation of multiple independent sequences.
16 fn prefix_sum<F>(&self, zero: D, combine: F) -> Self where F: Fn(&K,&D,&D)->D + 'static;
17
18 /// Determine the prefix sum at each element of `location`.
19 fn prefix_sum_at<F>(&self, locations: Collection<G, (usize, K)>, zero: D, combine: F) -> Self where F: Fn(&K,&D,&D)->D + 'static;
20}
21
22impl<G, K, D> PrefixSum<G, K, D> for Collection<G, ((usize, K), D)>
23where
24 G: Scope<Timestamp: Lattice>,
25 K: ExchangeData + ::std::hash::Hash,
26 D: ExchangeData + ::std::hash::Hash,
27{
28 fn prefix_sum<F>(&self, zero: D, combine: F) -> Self where F: Fn(&K,&D,&D)->D + 'static {
29 self.prefix_sum_at(self.map(|(x,_)| x), zero, combine)
30 }
31
32 fn prefix_sum_at<F>(&self, locations: Collection<G, (usize, K)>, zero: D, combine: F) -> Self where F: Fn(&K,&D,&D)->D + 'static {
33
34 let combine1 = ::std::rc::Rc::new(combine);
35 let combine2 = combine1.clone();
36
37 let ranges = aggregate(self.clone(), move |k,x,y| (*combine1)(k,x,y));
38 broadcast(ranges, locations, zero, move |k,x,y| (*combine2)(k,x,y))
39 }
40}
41
42/// Accumulate data in `collection` into all powers-of-two intervals containing them.
43pub fn aggregate<G, K, D, F>(collection: Collection<G, ((usize, K), D)>, combine: F) -> Collection<G, ((usize, usize, K), D)>
44where
45 G: Scope<Timestamp: Lattice>,
46 K: ExchangeData + ::std::hash::Hash,
47 D: ExchangeData + ::std::hash::Hash,
48 F: Fn(&K,&D,&D)->D + 'static,
49{
50 // initial ranges are at each index, and with width 2^0.
51 let unit_ranges = collection.map(|((index, key), data)| ((index, 0, key), data));
52
53 unit_ranges
54 .iterate(|ranges|
55
56 // Each available range, of size less than usize::max_value(), advertises itself as the range
57 // twice as large, aligned to integer multiples of its size. Each range, which may contain at
58 // most two elements, then summarizes itself using the `combine` function. Finally, we re-add
59 // the initial `unit_ranges` intervals, so that the set of ranges grows monotonically.
60
61 ranges
62 .filter(|&((_pos, log, _), _)| log < 64)
63 .map(|((pos, log, key), data)| ((pos >> 1, log + 1, key), (pos, data)))
64 .reduce(move |&(_pos, _log, ref key), input, output| {
65 let mut result = (input[0].0).1.clone();
66 if input.len() > 1 { result = combine(key, &result, &(input[1].0).1); }
67 output.push((result, 1));
68 })
69 .concat(&unit_ranges.enter(&ranges.scope()))
70 )
71}
72
73/// Produces the accumulated values at each of the `usize` locations in `queries`.
74pub fn broadcast<G, K, D, F>(
75 ranges: Collection<G, ((usize, usize, K), D)>,
76 queries: Collection<G, (usize, K)>,
77 zero: D,
78 combine: F) -> Collection<G, ((usize, K), D)>
79where
80 G: Scope<Timestamp: Lattice + Ord + ::std::fmt::Debug>,
81 K: ExchangeData + ::std::hash::Hash,
82 D: ExchangeData + ::std::hash::Hash,
83 F: Fn(&K,&D,&D)->D + 'static,
84{
85
86 let zero0 = zero.clone();
87 let zero1 = zero.clone();
88 let zero2 = zero.clone();
89
90 // The `queries` collection may not line up with an existing element of `ranges`, and so we must
91 // track down the first range that matches. If it doesn't exist, we will need to produce a zero
92 // value. We could produce the full path from (0, key) to (idx, key), and aggregate any and all
93 // matches. This has the defect of being n log n rather than linear, as the root ranges will be
94 // replicated for each query.
95 //
96 // I think it works to have each (idx, key) propose each of the intervals it knows should be used
97 // to assemble its input. We then `distinct` these and intersect them with the offered `ranges`,
98 // essentially performing a semijoin. We then perform the unfolding, where we might need to use
99 // empty ranges if none exist in `ranges`.
100
101 // We extract desired ranges for each `idx` from its binary representation: each set bit requires
102 // the contribution of a range, and we call out each of these. This could produce a super-linear
103 // amount of data (multiple requests for the roots), but it will be compacted down in `distinct`.
104 // We could reduce the amount of data by producing the requests iteratively, with a distinct in
105 // the loop to pre-suppress duplicate requests. This comes at a complexity cost, though.
106 let requests =
107 queries
108 .flat_map(|(idx, key)|
109 (0 .. 64)
110 .filter(move |i| (idx & (1usize << i)) != 0) // set bits require help.
111 .map(move |i| ((idx >> i) - 1, i, key.clone())) // width 2^i interval.
112 )
113 .distinct();
114
115 // Acquire each requested range.
116 let full_ranges =
117 ranges
118 .semijoin(&requests);
119
120 // Each requested range should exist, even if as a zero range, for correct reconstruction.
121 let zero_ranges =
122 full_ranges
123 .map(move |((idx, log, key), _)| ((idx, log, key), zero0.clone()))
124 .negate()
125 .concat(&requests.map(move |(idx, log, key)| ((idx, log, key), zero1.clone())));
126
127 // Merge occupied and empty ranges.
128 let used_ranges = full_ranges.concat(&zero_ranges);
129
130 // Each key should initiate a value of `zero` at position `0`.
131 let init_states =
132 queries
133 .map(move |(_, key)| ((0, key), zero2.clone()))
134 .distinct();
135
136 // Iteratively expand assigned values by joining existing ranges with current assignments.
137 init_states
138 .iterate(|states| {
139 used_ranges
140 .enter(&states.scope())
141 .map(|((pos, log, key), data)| ((pos << log, key), (log, data)))
142 .join_map(states, move |&(pos, ref key), &(log, ref data), state|
143 ((pos + (1 << log), key.clone()), combine(key, state, data)))
144 .concat(&init_states.enter(&states.scope()))
145 .distinct()
146 })
147 .semijoin(&queries)
148}