1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
use std::pin::Pin;
use std::sync::{Arc, Mutex, PoisonError, Weak};
use std::thread;
use std::time::{Duration, Instant};

use futures::prelude::*;
use linked_hash_map::LinkedHashMap;
use log::trace;

use crate::middleware::session::backend::{Backend, NewBackend, SessionFuture};
use crate::middleware::session::{SessionError, SessionIdentifier};

/// Type alias for the `MemoryBackend` storage container.
type MemoryMap = Mutex<LinkedHashMap<String, (Instant, Vec<u8>)>>;

/// Defines the in-process memory based session storage.
///
/// This is the default implementation which is used by `NewSessionMiddleware::default()`
#[derive(Clone)]
pub struct MemoryBackend {
    // Intuitively, a global `Mutex<_>` sounded like the slowest option. However, in some
    // benchmarking it proved to be the faster out of the options that were tried:
    //
    // 1. Background thread containing all data, acting as an internal "server" for session data,
    //    passing messages via `std::sync::mpsc::sync_channel`;
    // 2. Background thread maintaining only LRU data for each session ID, and purging them when
    //    they exceed the TTL, passing messages via a `std::sync::mpsc::sync_channel`;
    // 3. The same options, but with messages being passed via `crossbeam::sync::MsQueue`;
    // 4. Naive, global mutex.
    //
    // The performance was about 10~15% higher with the naive implementation, when measured in a
    // similarly naive benchmark using `wrk` and a lightweight sample app. Real-world use cases
    // might show a need to replace this with a smarter implementation, but today there's very
    // little overhead here.
    storage: Arc<MemoryMap>,
}

impl MemoryBackend {
    /// Creates a new `MemoryBackend` where sessions expire and are removed after the `ttl` has
    /// elapsed.
    ///
    /// Alternately, `MemoryBackend::default()` creates a `MemoryBackend` with a `ttl` of one hour.
    ///
    /// ## Examples
    ///
    /// ```rust
    /// # extern crate gotham;
    /// # use std::time::Duration;
    /// # use gotham::middleware::session::{MemoryBackend, NewSessionMiddleware};
    /// # fn main() {
    /// NewSessionMiddleware::new(MemoryBackend::new(Duration::from_secs(3600)))
    /// # ;}
    /// ```
    pub fn new(ttl: Duration) -> MemoryBackend {
        let storage = Arc::new(Mutex::new(LinkedHashMap::new()));

        {
            let storage = Arc::downgrade(&storage);
            thread::spawn(move || cleanup_loop(storage, ttl));
        }

        MemoryBackend { storage }
    }
}

impl Default for MemoryBackend {
    fn default() -> MemoryBackend {
        MemoryBackend::new(Duration::from_secs(3600))
    }
}

impl NewBackend for MemoryBackend {
    type Instance = MemoryBackend;

    fn new_backend(&self) -> anyhow::Result<Self::Instance> {
        Ok(self.clone())
    }
}

impl Backend for MemoryBackend {
    fn persist_session(
        &self,
        identifier: SessionIdentifier,
        content: &[u8],
    ) -> Result<(), SessionError> {
        match self.storage.lock() {
            Ok(mut storage) => {
                storage.insert(identifier.value, (Instant::now(), Vec::from(content)));
                Ok(())
            }
            Err(PoisonError { .. }) => {
                unreachable!("session memory backend lock poisoned, HashMap panicked?")
            }
        }
    }

    fn read_session(&self, identifier: SessionIdentifier) -> Pin<Box<SessionFuture>> {
        match self.storage.lock() {
            Ok(mut storage) => match storage.get_refresh(&identifier.value) {
                Some(&mut (ref mut instant, ref value)) => {
                    *instant = Instant::now();
                    future::ok(Some(value.clone())).boxed()
                }
                None => future::ok(None).boxed(),
            },
            Err(PoisonError { .. }) => {
                unreachable!("session memory backend lock poisoned, HashMap panicked?")
            }
        }
    }

    fn drop_session(&self, identifier: SessionIdentifier) -> Result<(), SessionError> {
        match self.storage.lock() {
            Ok(mut storage) => {
                storage.remove(&identifier.value);
                Ok(())
            }
            Err(PoisonError { .. }) => {
                unreachable!("session memory backend lock poisoned, HashMap panicked?")
            }
        }
    }
}

fn cleanup_loop(storage: Weak<MemoryMap>, ttl: Duration) {
    loop {
        // If the original `Arc<_>` goes away, we don't need to keep sweeping the cache, because
        // it's gone too. We can bail out of this thread when the weak ref fails to upgrade.
        let storage = match storage.upgrade() {
            None => break,
            Some(storage) => storage,
        };

        let duration = match storage.lock() {
            Err(PoisonError { .. }) => break,
            Ok(mut storage) => cleanup_once(&mut storage, ttl),
        };

        if let Some(duration) = duration {
            thread::sleep(duration);
        }
    }
}

fn cleanup_once(
    storage: &mut LinkedHashMap<String, (Instant, Vec<u8>)>,
    ttl: Duration,
) -> Option<Duration> {
    match storage.front() {
        Some((_, &(instant, _))) => {
            let age = instant.elapsed();

            if age >= ttl {
                if let Some((key, _)) = storage.pop_front() {
                    trace!(" expired session {} and removed from MemoryBackend", key);
                }

                // We just removed one, so skip the sleep and check the next entry
                None
            } else {
                // Ensure to shrink the storage after a spike in sessions.
                //
                // Even with this, memory usage won't always drop back to pre-spike levels because
                // the OS can hang onto it.
                //
                // The arbitrary numbers here were chosen to avoid the resizes being extremely
                // frequent. Powers of 2 seemed like a reasonable idea, to let the optimiser
                // potentially shave off a few CPU cycles. Totally unscientific though.
                let cap = storage.capacity();
                let len = storage.len();

                if cap >= 65536 && cap / 8 > len {
                    storage.shrink_to_fit();

                    trace!(
                        " session backend had capacity {} and {} sessions, new capacity: {}",
                        cap,
                        len,
                        storage.capacity()
                    );
                }

                // Sleep until the next entry expires, but for at least 1 second
                Some(::std::cmp::max(ttl - age, Duration::from_secs(1)))
            }
        }
        // No sessions; sleep for the TTL, because that's the soonest we'll need to expire anything
        None => Some(ttl),
    }
}

#[cfg(test)]
mod tests {
    use super::*;

    use rand;

    #[test]
    fn cleanup_test() {
        let mut storage = LinkedHashMap::new();

        storage.insert(
            "abcd".to_owned(),
            (Instant::now() - Duration::from_secs(2), vec![]),
        );

        cleanup_once(&mut storage, Duration::from_secs(1));
        assert!(storage.is_empty());
    }

    #[test]
    fn cleanup_join_test() {
        let storage = Arc::new(Mutex::new(LinkedHashMap::new()));
        let weak = Arc::downgrade(&storage);

        let handle = thread::spawn(move || cleanup_loop(weak, Duration::from_millis(1)));

        drop(storage);
        handle.join().unwrap();
    }

    #[test]
    fn memory_backend_test() {
        let new_backend = MemoryBackend::new(Duration::from_millis(100));
        let bytes: Vec<u8> = (0..64).map(|_| rand::random()).collect();
        let identifier = SessionIdentifier {
            value: "totally_random_identifier".to_owned(),
        };

        new_backend
            .new_backend()
            .expect("can't create backend for write")
            .persist_session(identifier.clone(), &bytes[..])
            .expect("failed to persist");

        let received = futures::executor::block_on(
            new_backend
                .new_backend()
                .expect("can't create backend for read")
                .read_session(identifier.clone()),
        )
        .expect("no response from backend")
        .expect("session data missing");

        assert_eq!(bytes, received);
    }

    #[test]
    fn memory_backend_refresh_test() {
        let new_backend = MemoryBackend::new(Duration::from_millis(100));
        let bytes: Vec<u8> = (0..64).map(|_| rand::random()).collect();
        let identifier = SessionIdentifier {
            value: "totally_random_identifier".to_owned(),
        };
        let bytes2: Vec<u8> = (0..64).map(|_| rand::random()).collect();
        let identifier2 = SessionIdentifier {
            value: "another_totally_random_identifier".to_owned(),
        };

        let backend = new_backend
            .new_backend()
            .expect("can't create backend for write");

        backend
            .persist_session(identifier.clone(), &bytes[..])
            .expect("failed to persist");

        backend
            .persist_session(identifier2.clone(), &bytes2[..])
            .expect("failed to persist");

        {
            let mut storage = backend.storage.lock().expect("couldn't lock storage");
            assert_eq!(
                storage.front().expect("no front element").0,
                &identifier.value
            );

            assert_eq!(
                storage.back().expect("no back element").0,
                &identifier2.value
            );
        }

        futures::executor::block_on(backend.read_session(identifier.clone()))
            .expect("failed to read session");

        {
            // Identifiers have swapped
            let mut storage = backend.storage.lock().expect("couldn't lock storage");
            assert_eq!(
                storage.front().expect("no front element").0,
                &identifier2.value
            );

            assert_eq!(
                storage.back().expect("no back element").0,
                &identifier.value
            );
        }
    }
}