1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
//! A library for creating the memoization of a function
//! that uses cache to improve the performance.
//!
//! You can create the memoization with `unsync::memoize` function.
//! It uses a `HashMap` for caching.
//!
//! ```
//! use fn_memo::{FnMemo, unsync, recur_fn::direct};
//!
//! let mul_2 = unsync::memoize(direct(|n| {
//!     println!("Evaluating {}", n);
//!     n * 2
//! }));
//!
//! assert_eq!(0, mul_2.call(0)); // Output "Evaluating 0."
//! assert_eq!(4, mul_2.call(2)); // Output "Evaluating 2."
//! assert_eq!(10, mul_2.call(5)); // Output "Evaluating 5."
//! assert_eq!(4, mul_2.call(2)); // No output. The result is cached.
//! mul_2.clear_cache();
//! assert_eq!(4, mul_2.call(2)); // Output "Evaluating 2."
//! ```
//!
//! The `memoize` function takes a `RecurFn` argument,
//! which allows you to memoize a recursive function and each recursion
//! result will be cached. See
//! [the API reference of `recur-fn`](https://docs.rs/recur-fn/)
//! for details.
//!
//! ```
//! use fn_memo::{FnMemo, unsync, recur_fn::recur_fn};
//!
//! let fib = unsync::memoize(recur_fn(|fib, n: usize| {
//!     println!("Evaluating {}", n);
//!     if n <= 1 {
//!         n
//!     } else {
//!         fib(n - 1) + fib(n - 2)
//!     }
//! }));
//!
//! assert_eq!(55, fib.call(10));
//! assert_eq!(5, fib.call(5));
//! ```
//!
//! The code above will output the evaluation from 0 to 10.
//! Each of them is outputed only once.
//!
//! For the sequence (i.e. the function that takes an `usize` as argument),
//! you can also use `unsync::memoize_seq`. It uses a `Vec` as a bucket
//! to cache, so it has a better performance but takes the memory
//! proportional to the largest argument that have cached.
//!
//! You can costumize the data structure of the cache by implementing
//! `unsync::Cache` trait and create memoization with `unsync::Memo::new` method.
//!
//! The APIs under `unsync` namespace are for single-thread.
//! The result of `unsync::memoize` does not `Sync` even the cached function does.
//!
//! ```compile_fail
//! use std::{sync::Arc, thread};
//! use fn_memo::{FnMemo, unsync, recur_fn::direct};
//!
//! let f = Arc::new(unsync::memoize(direct(|n: i32| n)));
//! thread::spawn(move || { f }); // Compile Error
//! ```
//!
//! The synchronized memoization APIs are under `sync` namespace.
//!
//! ```
//! use fn_memo::{FnMemo, sync::chashmap, recur_fn::direct};
//! use std::thread;
//! use std::sync::Arc;
//!
//! let mul_2 = Arc::new(chashmap::memoize(direct(|n| {
//!     println!("Evaluating {}", n);
//!     n * 2
//! })));
//!
//! let mut threads = Vec::new();
//! for _ in 0..4 {
//!     threads.push(thread::spawn({
//!         let mul_2 = Arc::clone(&mul_2);
//!         move || {
//!             for n in 0..10 {
//!                 assert_eq!(n*2, mul_2.call(n));
//!             }
//!         }
//!     }));
//! }
//! for thread in threads {
//!     thread.join().unwrap();
//! }
//! ```
//!
//! The code above will output the evaluation from 0 to 9.
//! Each of them is outputed only once.
#[cfg(feature = "sync")]
pub mod sync;
pub mod unsync;

pub use recur_fn;

/// The memoized function.
pub trait FnMemo<Arg, Output> {
    /// Calls the function. If the result of `arg` is already cached,
    /// it will return the cached result, otherwise, it caculates the result
    /// and adds it to the cache.
    fn call(&self, arg: Arg) -> Output;
    /// Clears the cache.
    fn clear_cache(&self);
}