1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
//! Query batching utility

extern crate batch_recv;
extern crate crossbeam_channel as chan;
extern crate futures;
extern crate itertools;
extern crate worker_sentinel;

use std::fmt::Debug;
use futures::sync::oneshot;
use itertools::Itertools;
use worker_sentinel::{Work, WorkFactory};
use batch_recv::BatchRecv;

/// Trait for values which is identifiable by unique `Key`
///
/// Values must be cloneable because a single value will be cloned to the respective
/// multiple callers if some callers request by the same key.
pub trait Value: Debug + Clone + Send {
    /// Key is used to route the values to the caller.
    type Key: Ord + Clone + Send + 'static;
    /// Returns a `Key`
    fn key(&self) -> &Self::Key;
}

/// Trait for querier backend
pub trait Backend: Send + 'static {
    type Value: Value;
    type Error: Debug + Clone + Send;
    /// This function provides the actual data fetching logic.
    fn batch_load<'a, I>(&self, keys: I) -> Result<Vec<Self::Value>, Self::Error>
    where
        I: Iterator<Item = &'a <Self::Value as Value>::Key> + 'a;
}

pub trait NewBackend: Send + Sync + 'static {
    type Backend: Backend;
    fn new_backend(&self) -> Self::Backend;
}
impl<F, B> NewBackend for F
where
    B: Backend,
    F: Fn() -> B + Send + Sync + 'static,
{
    type Backend = B;
    fn new_backend(&self) -> Self::Backend {
        self()
    }
}

type LoadResult<B> = Result<Option<<B as Backend>::Value>, <B as Backend>::Error>;
type Message<B> = (
    <<B as Backend>::Value as Value>::Key,
    oneshot::Sender<LoadResult<B>>,
);
type QueueTx<B> = chan::Sender<Message<B>>;
type QueueRx<B> = chan::Receiver<Message<B>>;

/// Batched data loader interface
///
/// Loader is composed of the queue which associated to the backend.
#[derive(Clone)]
pub struct Loader<B>
where
    B: Backend,
{
    queue_tx: QueueTx<B>,
}

impl<B> Loader<B>
where
    B: Backend,
{
    /// Create new loader
    ///
    /// `concurrent` sets the number of threads which runs the backend.
    /// `new_backend` will be called in spawning the new thread.
    pub fn new<N>(new_backend: N, batch_size: usize, concurrent: usize) -> Loader<B>
    where
        N: NewBackend<Backend = B> + 'static,
    {
        let (queue_tx, queue_rx) = chan::unbounded();
        let work_factory = BackendWorkFactory {
            queue_rx,
            new_backend,
            batch_size,
        };
        worker_sentinel::spawn(concurrent, work_factory);
        Loader { queue_tx }
    }

    /// Load value by key
    ///
    /// This function writes the key to the queue and returns a Future to wait the result.
    pub fn load(
        &self,
        key: <B::Value as Value>::Key,
    ) -> Result<oneshot::Receiver<LoadResult<B>>, chan::SendError<<B::Value as Value>::Key>> {
        let (cb_tx, cb_rx) = oneshot::channel();
        self.queue_tx.send((key, cb_tx)).map_err(|err| {
            let (key, _) = err.into_inner();
            chan::SendError(key)
        })?;
        Ok(cb_rx)
    }
}

struct BackendWork<B>
where
    B: Backend,
{
    queue_rx: QueueRx<B>,
    backend: B,
    batch_size: usize,
}
impl<B> Work for BackendWork<B>
where
    B: Backend,
{
    fn work(self) -> Option<Self> {
        let mut requests: Vec<_> = self.queue_rx.batch_recv(self.batch_size).ok()?.collect();
        requests.sort_by(|&(ref left, _), &(ref right, _)| left.cmp(&right));
        let req_groups_by_key = requests.into_iter().group_by(|&(ref key, _)| key.clone());
        let req_groups_by_key_vec: Vec<_> = req_groups_by_key.into_iter().collect();

        let ret = {
            let keys_iter = req_groups_by_key_vec.iter().map(|&(ref key, _)| key);
            self.backend.batch_load(keys_iter)
        };
        let mut values = match ret {
            Ok(values) => values,
            Err(err) => {
                for (_, req_group) in req_groups_by_key_vec {
                    for (_, cb) in req_group {
                        cb.send(Err(err.clone())).expect("return error as result");
                    }
                }
                return Some(self);
            }
        };
        values.sort_by(|ref left, ref right| left.key().cmp(right.key()));
        let joined = req_groups_by_key_vec
            .into_iter()
            .merge_join_by(values.into_iter(), |&(ref key, _), value| {
                key.cmp(value.key())
            });
        for pair in joined {
            use itertools::EitherOrBoth::{Both, Left};
            match pair {
                Left((_, req_group)) => for (_, cb) in req_group {
                    cb.send(Ok(None)).expect("respond to caller");
                },
                Both((_, req_group), value) => for (_, cb) in req_group {
                    cb.send(Ok(Some(value.clone()))).expect("respond to caller");
                },
                _ => unreachable!(),
            }
        }
        Some(self)
    }
}

struct BackendWorkFactory<N>
where
    N: NewBackend,
{
    queue_rx: QueueRx<N::Backend>,
    new_backend: N,
    batch_size: usize,
}
impl<N> WorkFactory for BackendWorkFactory<N>
where
    N: NewBackend,
{
    type Work = BackendWork<N::Backend>;
    fn build(&self) -> Self::Work {
        let backend = self.new_backend.new_backend();
        let queue_rx = self.queue_rx.clone();
        let batch_size = self.batch_size;
        BackendWork {
            backend,
            queue_rx,
            batch_size,
        }
    }
}

#[cfg(test)]
mod teet {
    use futures::{future, Future};
    use super::{Backend, Loader, Value};
    #[derive(Debug, Clone, PartialEq)]
    struct HalfValue {
        key: u32,
        half: u32,
    }
    impl Value for HalfValue {
        type Key = u32;
        fn key(&self) -> &u32 {
            &self.key
        }
    }
    struct HalfBackend;
    impl Backend for HalfBackend {
        type Value = HalfValue;
        type Error = ();
        fn batch_load<'a, I>(&self, keys: I) -> Result<Vec<Self::Value>, Self::Error>
        where
            I: Iterator<Item = &'a <Self::Value as Value>::Key> + 'a,
        {
            let ret = keys.filter_map(|&key| {
                if key % 2 == 0 {
                    Some(HalfValue { key, half: key / 2 })
                } else {
                    None
                }
            }).collect();
            Ok(ret)
        }
    }

    #[test]
    fn test_loader() {
        let loader = Loader::new(|| HalfBackend, 10, 1);

        let f1 = loader
            .load(1)
            .unwrap()
            .map(|v| assert!(v.unwrap().is_none()));
        let f3 = loader
            .load(3)
            .unwrap()
            .map(|v| assert!(v.unwrap().is_none()));
        let f2 = loader.load(2).unwrap().map(|v| {
            assert_eq!(v.unwrap().unwrap(), HalfValue { key: 2, half: 1 })
        });
        let f4 = loader.load(4).unwrap().map(|v| {
            assert_eq!(v.unwrap().unwrap(), HalfValue { key: 4, half: 2 })
        });
        future::join_all(vec![
            Box::new(f1) as Box<Future<Item = _, Error = _>>,
            Box::new(f2) as Box<_>,
            Box::new(f3) as Box<_>,
            Box::new(f4) as Box<_>,
        ]).wait()
            .unwrap();
    }
}