1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
use std::fmt::{self, Debug};
use std::ptr;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::SeqCst;

use bincode::{Infinite, deserialize, serialize};
use serde::de::DeserializeOwned;
use serde::Serialize;

use super::*;

const MAX_FRAG_LEN: usize = 2;

pub trait Materializer: Send + Sync {
    type MaterializedPage;
    type PartialPage: Serialize + DeserializeOwned;
    type Recovery;

    /// Used to generate the result of `get` requests on the `PageCache`
    fn materialize(&self, Vec<Self::PartialPage>) -> Self::MaterializedPage;

    /// Used to compress long chains of partial pages into a condensed form
    /// during compaction.
    fn consolidate(&self, Vec<Self::PartialPage>) -> Vec<Self::PartialPage>;

    /// Used to feed custom recovery information back to a higher-level abstraction
    /// during startup. For example, a B-Link tree must know what the current
    /// root node is before it can start serving requests.
    fn recover(&mut self, Self::MaterializedPage) -> Option<Self::Recovery>;
}

pub struct PageCache<L: Log, PM>
    where PM: Materializer + Sized,
          L: Log + Sized
{
    t: PM,
    inner: Radix<stack::Stack<*const PM::PartialPage>>,
    max_id: AtomicUsize,
    free: Stack<PageID>,
    log: Box<L>,
}

unsafe impl<L: Log, PM: Materializer> Send for PageCache<L, PM> {}
unsafe impl<L: Log, PM: Materializer> Sync for PageCache<L, PM> {}

impl<L: Log, PM: Materializer> Debug for PageCache<L, PM> {
    fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
        f.write_str(&*format!("PageCache {{ max: {:?} free: {:?} }}\n",
                              self.max_id.load(SeqCst),
                              self.free))
    }
}

impl<PM> PageCache<LockFreeLog, PM>
    where PM: Materializer,
          PM::PartialPage: Clone
{
    pub fn new(pm: PM, path: Option<String>) -> PageCache<LockFreeLog, PM> {
        PageCache {
            t: pm,
            inner: Radix::default(),
            max_id: AtomicUsize::new(0),
            free: Stack::default(),
            log: Box::new(LockFreeLog::start_system(path)),
        }
    }

    /// Read updates from the log, apply them to our pagecache.
    pub fn recover(&mut self, from: LogID) -> Option<PM::Recovery> {
        let mut last_good_id = 0;
        let mut free_pids = vec![];
        let mut recovery = None;

        for (log_id, bytes) in self.log.iter_from(from) {
            if let Ok(append) = deserialize::<LoggedUpdate<PM::PartialPage>>(&*bytes) {
                last_good_id = log_id + bytes.len() as LogID + HEADER_LEN as LogID;
                match append.update {
                    Update::Append(appends) => {
                        let stack = self.inner.get(append.pid).unwrap();

                        unsafe {
                            for append in appends {
                                (*stack).push(raw(append));
                            }

                            let (_, stack_iter) = (*stack).iter_at_head();

                            let mut partial_pages: Vec<PM::PartialPage> =
                                stack_iter.map(|ptr| (**ptr).clone()).collect();

                            partial_pages.reverse();

                            let new_page = self.t.materialize(partial_pages);

                            let r = self.t.recover(new_page);
                            if r.is_some() {
                                recovery = r;
                            }
                        }
                    }
                    Update::Compact(appends) => {
                        // TODO GC previous stack
                        // TODO feed compacted page to recover?
                        let _prev = self.inner.del(append.pid);
                        let stack = raw(Stack::default());
                        self.inner.insert(append.pid, stack).unwrap();

                        for append in appends {
                            unsafe {
                                (*stack).push(raw(append));
                            }
                        }
                    }
                    Update::Del => {
                        self.inner.del(append.pid);
                        free_pids.push(append.pid);
                    }
                    Update::Alloc => {
                        let stack = raw(Stack::default());
                        self.inner.insert(append.pid, stack).unwrap();
                        free_pids.retain(|&pid| pid != append.pid);
                        if self.max_id.load(SeqCst) < append.pid {
                            self.max_id.store(append.pid, SeqCst);
                        }
                    }
                }
            }
        }
        free_pids.sort();
        free_pids.reverse();
        for free_pid in free_pids {
            self.free.push(free_pid);
        }

        self.max_id.store(last_good_id as usize, SeqCst);

        recovery
    }

    pub fn allocate(&self) -> (PageID, *const stack::Node<*const PM::PartialPage>) {
        let pid = self.free.pop().unwrap_or_else(|| self.max_id.fetch_add(1, SeqCst));
        let stack = raw(Stack::default());
        self.inner.insert(pid, stack).unwrap();

        // write info to log
        let append: LoggedUpdate<PM::PartialPage> = LoggedUpdate {
            pid: pid,
            update: Update::Alloc,
        };
        let bytes = serialize(&append, Infinite).unwrap();
        self.log.write(bytes);

        (pid, ptr::null())
    }

    pub fn free(&self, pid: PageID) {
        // TODO epoch-based gc for reusing pid & freeing stack
        let stack_ptr = self.inner.del(pid);

        // write info to log
        let append: LoggedUpdate<PM::PartialPage> = LoggedUpdate {
            pid: pid,
            update: Update::Del,
        };
        let bytes = serialize(&append, Infinite).unwrap();
        self.log.write(bytes);

        // add pid to free stack to reduce fragmentation over time
        self.free.push(pid);

        unsafe {
            let ptrs = (*stack_ptr).pop_all();
            for ptr in ptrs {
                Box::from_raw(ptr as *mut Frag);
            }
        }
    }

    pub fn get(&self,
               pid: PageID)
               -> Option<(PM::MaterializedPage, *const stack::Node<*const PM::PartialPage>)> {
        let stack_ptr = self.inner.get(pid);
        if stack_ptr.is_none() {
            return None;
        }

        let stack_ptr = stack_ptr.unwrap();

        let mut head = unsafe { (*stack_ptr).head() };

        let stack_iter = StackIter::from_ptr(head);

        let mut partial_pages: Vec<PM::PartialPage> =
            unsafe { stack_iter.map(|ptr| (**ptr).clone()).collect() };
        partial_pages.reverse();
        let partial_pages = partial_pages;

        if partial_pages.len() > MAX_FRAG_LEN {
            let consolidated = self.t.consolidate(partial_pages.clone());

            let node = node_from_frag_vec(consolidated.clone());

            // log consolidation to disk
            let append = LoggedUpdate {
                pid: pid,
                update: Update::Compact(consolidated),
            };
            let bytes = serialize(&append, Infinite).unwrap();
            let log_reservation = self.log.reserve(bytes);

            let ret = unsafe { (*stack_ptr).cas(head, node) };

            if let Ok(new) = ret {
                // consolidation succeeded!
                log_reservation.complete();
                head = new;
                // TODO GC old stack (head)
            } else {
                log_reservation.abort();
            }
        }

        let materialized = self.t.materialize(partial_pages);

        Some((materialized, head))
    }

    pub fn append(&self,
                  pid: PageID,
                  old: *const stack::Node<*const PM::PartialPage>,
                  new: PM::PartialPage)
                  -> Result<*const stack::Node<*const PM::PartialPage>,
                            *const stack::Node<*const PM::PartialPage>> {
        let append = LoggedUpdate {
            pid: pid,
            update: Update::Append(vec![new.clone()]),
        };
        let bytes = serialize(&append, Infinite).unwrap();
        let log_reservation = self.log.reserve(bytes);
        let log_offset = log_reservation.log_id();

        let stack_ptr = self.inner.get(pid).unwrap();
        let result = unsafe { (*stack_ptr).cap(old, raw(new)) };

        if let Err(_ptr) = result {
            log_reservation.abort();
        } else {
            log_reservation.complete();
        }
        self.log.make_stable(log_offset);

        // TODO GC
        result
    }
}

/// `LoggedUpdate` is for writing blocks of `Update`'s to disk
/// sequentially, to reduce IO during page reads.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
struct LoggedUpdate<T> {
    pid: PageID,
    update: Update<T>,
}

#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
enum Update<T> {
    Append(Vec<T>),
    Compact(Vec<T>),
    Del,
    Alloc,
}