spaced_rs/lib.rs
1//! This is a small library that defines a SM2 inspired spaced repetition algorithm. It tries to
2//! assume as little as possible about your items. It also tries to include some self adjusting
3//! behaviour such that we model the forgetting curve as well as possible. We also include some
4//! randomness such that we decouple items that were created together and allow their review events
5//! to spread out in time.
6#![warn(missing_docs)]
7#![warn(rustdoc::missing_crate_level_docs)]
8
9// stdlib imports
10use std::f32::consts::E;
11
12// external crate imports
13use rand::Rng;
14
15/// User evaluation of review event. Did it feel too hard, just difficult enough or too easy?
16#[derive(Clone, Debug)]
17pub enum UserReview {
18 /// The forgetting curve decreases faster than expected, increase difficulty of item
19 TooHard,
20 /// The forgetting curve decreases just enough,
21 JustEnough,
22 /// The forgetting curve decreases slower than expected, decrease difficulty of item
23 TooEasy,
24}
25
26/// computes the number of days between the current review event and the next given a wanted recall
27/// probability. Expects a positive forgetting rate. Is not used directly can but is exposed
28/// anyway.
29#[inline]
30pub fn compute_interval(forgetting_rate: f32, probability: f32) -> i32 {
31 assert!(forgetting_rate.is_sign_positive());
32 assert!(probability < 1.0);
33
34 let n_days_f = probability.log(E) / (-forgetting_rate);
35 n_days_f as i32
36}
37
38/// Struct containing item specific data related to it's scheduling.
39#[derive(Clone, Debug)]
40pub struct SchedulingData {
41 /// the number of days between the last review event and the next one
42 pub interval: i32,
43 /// numerical representation of the 'difficulty' of the item. More difficult items will get a larger difficulty value
44 pub difficulty: f32,
45 /// numerical representation of how well the memory of this item has established itself in the user.
46 pub memory_strength: f32,
47 /// How the quotient between difficulty and memory strenght should be scaled. Used when the actual retention probability is not equal to the expected one.
48 pub adjusting_factor: f32,
49 /// how many times this card has been reviewed
50 pub times_reviewed: i32,
51 /// how many times this card has been reviewed successfully
52 pub times_recalled: i32,
53}
54
55/// struct containing various parameters used to update the scheduling data of an item
56#[derive(Clone, Debug)]
57pub struct UpdateParameters {
58 /// the factor (in percent) that the difficulty is increased/decreased if the user finds the
59 /// item to hard/easy
60 pub difficulty_change_factor: f32,
61 /// the faactor (in percent) that the memory_strength is increased when reviewing an item
62 pub memory_strength_change_factor: f32,
63}
64
65impl Default for SchedulingData {
66 /// Here we want the initial ratio between the difficulty and the memory strength to be
67 /// around -ln(0.9) =approx 0.1 (this results in the first interval being around 1 day)
68 /// I therefore simply set the difficulty to that value, then scale both it and the memory
69 /// strength by 100
70 fn default() -> Self {
71 SchedulingData {
72 interval: 1,
73 difficulty: 10.0,
74 memory_strength: 100.0,
75 adjusting_factor: 1.0,
76 times_reviewed: 0,
77 times_recalled: 0,
78 }
79 }
80}
81
82impl Default for UpdateParameters {
83 fn default() -> Self {
84 Self {
85 difficulty_change_factor: 1.1,
86 memory_strength_change_factor: 1.60,
87 }
88 }
89}
90
91/// main scheduling function. Takes the scheduling data of an item, result of the review, update constants, and the wanted rentention probability
92/// event and computes the next interval + changes to the item parameters.
93pub fn schedule(
94 item_data: SchedulingData,
95 user_review: UserReview,
96 update_parameters: UpdateParameters,
97 probability: f32,
98) -> SchedulingData {
99 // The value of f will be the quotient difficulty/memory_strength.
100 // If we want to the ratio between the new and old interval to be A then that formes the
101 // following equation: t2 = A * t1. Which if expanded becomes: A * ln(P)/-f1 = ln(P)/-f2. Out
102 // of this we then get the equation f1 * 1/A = f2 which can then be used to calculate the new
103 // value of f. Since f is a quotient we calculate this by first multiplying our wanted ratio
104 // with the memory_strength, update the difficulty with the user review and then compute the
105 // quotient.
106
107 // old data
108 let SchedulingData {
109 interval: _,
110 difficulty,
111 memory_strength,
112 adjusting_factor,
113 times_reviewed,
114 times_recalled,
115 } = item_data;
116
117 let new_difficulty = match user_review {
118 UserReview::TooHard => difficulty * update_parameters.difficulty_change_factor,
119 UserReview::JustEnough => difficulty,
120 UserReview::TooEasy => difficulty * (2.0 - update_parameters.difficulty_change_factor),
121 };
122
123 let new_memory_strength = memory_strength * update_parameters.memory_strength_change_factor;
124 let new_forgetting_rate = (1.0 / adjusting_factor) * (difficulty / memory_strength);
125 let next_interval_no_random = compute_interval(new_forgetting_rate, probability);
126
127 // we then want to introduce some noise in the interval
128 // TODO, move how much noise we want into the UpdateParameters struct
129 let mut rng = rand::thread_rng();
130 let random_range = next_interval_no_random / 10;
131 let random_change = rng.gen_range(0..random_range * 2) - random_range;
132 let next_interval = next_interval_no_random + random_change;
133
134 SchedulingData {
135 interval: next_interval,
136 difficulty: new_difficulty,
137 memory_strength: new_memory_strength,
138 adjusting_factor,
139 times_reviewed: times_reviewed + 1,
140 times_recalled: times_recalled + 1,
141 }
142}
143
144/// Computes how the ratio between review intervals should be scaled to more accurately
145/// align with the true forgetting curve. Computed as explained [here](https://docs.ankiweb.net/deck-options.html#interval-modifier)
146pub fn update_adjusting_factor(
147 item_data: SchedulingData,
148 target_probability: f32,
149) -> SchedulingData {
150 let SchedulingData {
151 interval,
152 difficulty,
153 memory_strength,
154 adjusting_factor: _,
155 times_reviewed,
156 times_recalled,
157 } = item_data;
158
159 // the actual recall probability for this item
160 let actual_probability = times_recalled as f32 / times_reviewed as f32;
161
162 let new_adjusting_factor = target_probability.log(E) / actual_probability.log(E);
163
164 SchedulingData {
165 interval,
166 difficulty,
167 memory_strength,
168 adjusting_factor: new_adjusting_factor,
169 times_reviewed,
170 times_recalled,
171 }
172}