genalg/evolution/
caching_challenge.rs

1use std::marker::PhantomData;
2
3use crate::{
4    caching::{CacheKey, CachedChallenge, ThreadLocalCachedChallenge},
5    evolution::{CacheType, Challenge},
6    phenotype::Phenotype,
7};
8
9/// Trait for wrapping a challenge with caching functionality.
10///
11/// This trait is automatically implemented for all types that implement the `Challenge` trait,
12/// allowing any challenge to be easily enhanced with caching capabilities without modifying
13/// its core implementation. Caching can significantly improve performance by avoiding redundant
14/// fitness evaluations, which is especially valuable when fitness calculations are expensive.
15///
16/// To use caching, the phenotype type must implement the `CacheKey` trait, which defines
17/// how to generate a unique identifier for each phenotype. This key is used as the lookup
18/// value in the cache.
19///
20/// # Performance Considerations
21///
22/// - **Cache Growth**: The cache grows unbounded by default. For long-running evolutions or
23///   large populations, consider clearing the cache periodically.
24///
25/// - **Thread Contention**: The global cache is protected by a mutex, which may become a
26///   bottleneck in highly parallel workloads. For these cases, consider the thread-local
27///   caching option.
28///
29/// - **Memory Usage**: Monitor memory usage if you're caching a very large number of phenotypes,
30///   as each cache entry consumes memory.
31///
32/// # Type Parameters
33///
34/// * `P` - The phenotype type
35pub trait CachingChallenge<P: Phenotype>: Challenge<P> + Sized + Clone {
36    /// Wraps this challenge with a global cache.
37    ///
38    /// This method creates a new `CachedChallenge` that wraps the current challenge,
39    /// adding a mutex-protected cache shared across all threads. Each unique phenotype
40    /// (according to its `cache_key()`) is evaluated only once, and subsequent evaluations
41    /// use the cached fitness value.
42    ///
43    /// # Performance Characteristics
44    ///
45    /// - **Advantages**: Ensures maximum cache reuse across all threads.
46    /// - **Limitations**: May create contention when many threads try to access the cache
47    ///   simultaneously. This can become a bottleneck in highly parallel scenarios.
48    ///
49    /// # Type Constraints
50    ///
51    /// The phenotype type `P` must implement the `CacheKey` trait, which defines how to
52    /// generate a unique identifier for each phenotype.
53    ///
54    /// # Returns
55    ///
56    /// A `CachedChallenge` that wraps this challenge with a global cache.
57    ///
58    /// # Example
59    ///
60    /// ```
61    /// # use genalg::{
62    /// #     evolution::{Challenge, caching_challenge::CachingChallenge},
63    /// #     phenotype::Phenotype,
64    /// #     caching::CacheKey,
65    /// #     rng::RandomNumberGenerator,
66    /// # };
67    /// #
68    /// # #[derive(Clone, Debug)]
69    /// # struct MyPhenotype { value: f64 }
70    /// #
71    /// # impl Phenotype for MyPhenotype {
72    /// #     fn crossover(&mut self, other: &Self) {}
73    /// #     fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {}
74    /// # }
75    /// #
76    /// # impl CacheKey for MyPhenotype {
77    /// #     type Key = i64;
78    /// #     fn cache_key(&self) -> Self::Key { (self.value * 1000.0).round() as i64 }
79    /// # }
80    /// #
81    /// # #[derive(Clone)]
82    /// # struct MyChallenge;
83    /// #
84    /// # impl Challenge<MyPhenotype> for MyChallenge {
85    /// #     fn score(&self, _phenotype: &MyPhenotype) -> f64 { 0.0 }
86    /// # }
87    /// #
88    /// # let challenge = MyChallenge;
89    /// let cached_challenge = challenge.with_global_cache();
90    ///
91    /// // Use the cached challenge like any other challenge
92    /// let phenotype = MyPhenotype { value: 42.0 };
93    /// let fitness = cached_challenge.score(&phenotype); // First evaluation is computed
94    /// let fitness_again = cached_challenge.score(&phenotype); // Uses cached value
95    /// ```
96    fn with_global_cache(&self) -> CachedChallenge<P, Self>
97    where
98        P: CacheKey;
99
100    /// Wraps this challenge with a thread-local cache.
101    ///
102    /// This method creates a new `ThreadLocalCachedChallenge` that wraps the current challenge,
103    /// adding a separate cache for each thread. This approach avoids mutex contention in
104    /// highly parallel workloads by giving each thread its own independent cache.
105    ///
106    /// # Performance Characteristics
107    ///
108    /// - **Advantages**: Eliminates mutex contention, improving parallel performance.
109    /// - **Limitations**: May result in redundant evaluations across threads, as the same
110    ///   phenotype evaluated in different threads will have separate cache entries.
111    ///   Also uses more memory as each thread maintains its own cache.
112    ///
113    /// # Type Constraints
114    ///
115    /// The phenotype type `P` must implement the `CacheKey` trait, which defines how to
116    /// generate a unique identifier for each phenotype.
117    ///
118    /// # Returns
119    ///
120    /// A `ThreadLocalCachedChallenge` that wraps this challenge with a thread-local cache.
121    ///
122    /// # Example
123    ///
124    /// ```
125    /// # use genalg::{
126    /// #     evolution::{Challenge, caching_challenge::CachingChallenge},
127    /// #     phenotype::Phenotype,
128    /// #     caching::CacheKey,
129    /// #     rng::RandomNumberGenerator,
130    /// # };
131    /// #
132    /// # #[derive(Clone, Debug)]
133    /// # struct MyPhenotype { value: f64 }
134    /// #
135    /// # impl Phenotype for MyPhenotype {
136    /// #     fn crossover(&mut self, other: &Self) {}
137    /// #     fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {}
138    /// # }
139    /// #
140    /// # impl CacheKey for MyPhenotype {
141    /// #     type Key = i64;
142    /// #     fn cache_key(&self) -> Self::Key { (self.value * 1000.0).round() as i64 }
143    /// # }
144    /// #
145    /// # #[derive(Clone)]
146    /// # struct MyChallenge;
147    /// #
148    /// # impl Challenge<MyPhenotype> for MyChallenge {
149    /// #     fn score(&self, _phenotype: &MyPhenotype) -> f64 { 0.0 }
150    /// # }
151    /// #
152    /// # let challenge = MyChallenge;
153    /// let thread_local_cached_challenge = challenge.with_thread_local_cache();
154    ///
155    /// // Ideal for parallel processing with rayon or other threading libraries
156    /// use rayon::prelude::*;
157    /// let phenotypes = vec![MyPhenotype { value: 1.0 }, MyPhenotype { value: 2.0 }];
158    /// let scores: Vec<f64> = phenotypes.par_iter()
159    ///     .map(|p| thread_local_cached_challenge.score(p))
160    ///     .collect();
161    /// ```
162    fn with_thread_local_cache(&self) -> ThreadLocalCachedChallenge<P, Self>
163    where
164        P: CacheKey;
165
166    /// Wraps this challenge with a cache of the specified type.
167    ///
168    /// This method provides a convenient way to choose between global and thread-local
169    /// caching based on runtime configuration.
170    ///
171    /// # Arguments
172    ///
173    /// * `cache_type` - The type of cache to use (Global or ThreadLocal).
174    ///
175    /// # Returns
176    ///
177    /// A boxed `Challenge` that wraps this challenge with the specified cache type.
178    /// The returned value is boxed to provide type erasure, allowing the client code
179    /// to handle different cache implementations uniformly.
180    ///
181    /// # Type Constraints
182    ///
183    /// The phenotype type `P` must implement the `CacheKey` trait and have a `'static`
184    /// lifetime. The challenge implementation must also have a `'static` lifetime.
185    ///
186    /// # Example
187    ///
188    /// ```
189    /// # use genalg::{
190    /// #     evolution::{Challenge, CacheType, caching_challenge::CachingChallenge},
191    /// #     phenotype::Phenotype,
192    /// #     caching::CacheKey,
193    /// #     rng::RandomNumberGenerator,
194    /// # };
195    /// #
196    /// # #[derive(Clone, Debug)]
197    /// # struct MyPhenotype { value: f64 }
198    /// #
199    /// # impl Phenotype for MyPhenotype {
200    /// #     fn crossover(&mut self, other: &Self) {}
201    /// #     fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {}
202    /// # }
203    /// #
204    /// # impl CacheKey for MyPhenotype {
205    /// #     type Key = i64;
206    /// #     fn cache_key(&self) -> Self::Key { (self.value * 1000.0).round() as i64 }
207    /// # }
208    /// #
209    /// # #[derive(Clone)]
210    /// # struct MyChallenge;
211    /// #
212    /// # impl Challenge<MyPhenotype> for MyChallenge {
213    /// #     fn score(&self, _phenotype: &MyPhenotype) -> f64 { 0.0 }
214    /// # }
215    /// #
216    /// # let challenge = MyChallenge;
217    /// // Select caching type based on configuration or runtime conditions
218    /// let use_thread_local = std::thread::available_parallelism().unwrap().get() > 4;
219    /// let cache_type = if use_thread_local {
220    ///     CacheType::ThreadLocal
221    /// } else {
222    ///     CacheType::Global
223    /// };
224    ///
225    /// let cached_challenge = challenge.with_cache(cache_type);
226    /// ```
227    fn with_cache(&self, cache_type: CacheType) -> Box<dyn Challenge<P>>
228    where
229        P: CacheKey + 'static,
230        Self: 'static,
231    {
232        match cache_type {
233            CacheType::Global => Box::new(self.with_global_cache()),
234            CacheType::ThreadLocal => Box::new(self.with_thread_local_cache()),
235        }
236    }
237}
238
239impl<P, C> CachingChallenge<P> for C
240where
241    P: Phenotype,
242    C: Challenge<P> + Clone,
243{
244    fn with_global_cache(&self) -> CachedChallenge<P, Self>
245    where
246        P: CacheKey,
247    {
248        CachedChallenge::new(self.clone())
249    }
250
251    fn with_thread_local_cache(&self) -> ThreadLocalCachedChallenge<P, Self>
252    where
253        P: CacheKey,
254    {
255        ThreadLocalCachedChallenge::new(self.clone())
256    }
257}
258
259/// A challenge adapter that can conditionally enable caching.
260///
261/// This adapter wraps a challenge and provides the same functionality as the wrapped challenge,
262/// allowing caching to be configured at runtime. Currently, it simply delegates to the inner
263/// challenge without implementing actual caching - it serves as a placeholder for future
264/// conditional caching functionality.
265///
266/// # Type Parameters
267///
268/// * `P` - The phenotype type
269/// * `C` - The inner challenge type
270///
271/// # Note
272///
273/// Important: In the current implementation, the `CachingChallengeSwitcher` does not actually
274/// implement caching functionality. It always calls the inner challenge's score method directly,
275/// regardless of whether a cache type is set. It is intended as a structural pattern for
276/// future enhancements.
277///
278/// For active caching, use the `CachingChallenge` trait methods directly:
279/// - `challenge.with_global_cache()`
280/// - `challenge.with_thread_local_cache()`
281/// - `challenge.with_cache(cache_type)`
282#[derive(Debug, Clone)]
283pub struct CachingChallengeSwitcher<P, C>
284where
285    P: Phenotype,
286    C: Challenge<P>,
287{
288    inner: C,
289    cache_type: Option<CacheType>,
290    _phantom: PhantomData<P>,
291}
292
293impl<P, C> CachingChallengeSwitcher<P, C>
294where
295    P: Phenotype,
296    C: Challenge<P>,
297{
298    /// Creates a new `CachingChallengeSwitcher` with the specified challenge.
299    ///
300    /// # Arguments
301    ///
302    /// * `challenge` - The challenge to wrap
303    ///
304    /// # Returns
305    ///
306    /// A new `CachingChallengeSwitcher` instance with caching disabled by default.
307    ///
308    /// # Example
309    ///
310    /// ```
311    /// # use genalg::{
312    /// #     evolution::{Challenge, caching_challenge::CachingChallengeSwitcher},
313    /// #     phenotype::Phenotype,
314    /// #     rng::RandomNumberGenerator,
315    /// # };
316    /// #
317    /// # #[derive(Clone, Debug)]
318    /// # struct MyPhenotype { value: f64 }
319    /// #
320    /// # impl Phenotype for MyPhenotype {
321    /// #     fn crossover(&mut self, other: &Self) {}
322    /// #     fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {}
323    /// # }
324    /// #
325    /// # #[derive(Clone)]
326    /// # struct MyChallenge;
327    /// #
328    /// # impl Challenge<MyPhenotype> for MyChallenge {
329    /// #     fn score(&self, _phenotype: &MyPhenotype) -> f64 { 0.0 }
330    /// # }
331    /// #
332    /// # let challenge = MyChallenge;
333    /// let switcher = CachingChallengeSwitcher::new(challenge);
334    /// ```
335    pub fn new(challenge: C) -> Self {
336        Self {
337            inner: challenge,
338            cache_type: None,
339            _phantom: PhantomData,
340        }
341    }
342
343    /// Configures this switcher to use caching.
344    ///
345    /// # Arguments
346    ///
347    /// * `cache_type` - The type of cache to use
348    ///
349    /// # Returns
350    ///
351    /// This switcher configured to use caching.
352    ///
353    /// # Note
354    ///
355    /// In the current implementation, this setting does not actually enable caching functionality.
356    /// The switcher still calls the inner challenge's score method directly. This method is
357    /// intended for future enhancements to the conditional caching functionality.
358    ///
359    /// # Example
360    ///
361    /// ```
362    /// # use genalg::{
363    /// #     evolution::{Challenge, CacheType, caching_challenge::CachingChallengeSwitcher},
364    /// #     phenotype::Phenotype,
365    /// #     rng::RandomNumberGenerator,
366    /// # };
367    /// #
368    /// # #[derive(Clone, Debug)]
369    /// # struct MyPhenotype { value: f64 }
370    /// #
371    /// # impl Phenotype for MyPhenotype {
372    /// #     fn crossover(&mut self, other: &Self) {}
373    /// #     fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {}
374    /// # }
375    /// #
376    /// # #[derive(Clone)]
377    /// # struct MyChallenge;
378    /// #
379    /// # impl Challenge<MyPhenotype> for MyChallenge {
380    /// #     fn score(&self, _phenotype: &MyPhenotype) -> f64 { 0.0 }
381    /// # }
382    /// #
383    /// # let challenge = MyChallenge;
384    /// let switcher = CachingChallengeSwitcher::new(challenge)
385    ///     .with_cache(CacheType::Global);
386    /// ```
387    pub fn with_cache(mut self, cache_type: CacheType) -> Self {
388        self.cache_type = Some(cache_type);
389        self
390    }
391
392    /// Returns the inner challenge.
393    ///
394    /// # Returns
395    ///
396    /// A reference to the inner challenge.
397    pub fn inner(&self) -> &C {
398        &self.inner
399    }
400
401    /// Unwraps this switcher, returning the inner challenge.
402    ///
403    /// # Returns
404    ///
405    /// The inner challenge.
406    pub fn unwrap(self) -> C {
407        self.inner
408    }
409}
410
411impl<P, C> Challenge<P> for CachingChallengeSwitcher<P, C>
412where
413    P: Phenotype,
414    C: Challenge<P>,
415{
416    fn score(&self, phenotype: &P) -> f64 {
417        // Currently, always use the inner challenge directly
418        // In a future implementation, this could conditionally use caching
419        // based on the cache_type field
420        self.inner.score(phenotype)
421    }
422}
423
424#[cfg(test)]
425mod tests {
426    use std::sync::atomic::{AtomicUsize, Ordering};
427    use std::sync::Arc;
428
429    use super::*;
430    use crate::rng::RandomNumberGenerator;
431
432    #[derive(Clone, Debug)]
433    struct TestPhenotype {
434        value: i32,
435    }
436
437    impl Phenotype for TestPhenotype {
438        fn crossover(&mut self, other: &Self) {
439            self.value = (self.value + other.value) / 2;
440        }
441
442        fn mutate(&mut self, _rng: &mut RandomNumberGenerator) {
443            self.value += 1;
444        }
445    }
446
447    impl CacheKey for TestPhenotype {
448        type Key = i32;
449
450        fn cache_key(&self) -> Self::Key {
451            self.value
452        }
453    }
454
455    #[derive(Clone)]
456    struct TestChallenge {
457        evaluations: Arc<AtomicUsize>,
458    }
459
460    impl TestChallenge {
461        fn new() -> Self {
462            Self {
463                evaluations: Arc::new(AtomicUsize::new(0)),
464            }
465        }
466
467        fn get_evaluations(&self) -> usize {
468            self.evaluations.load(Ordering::SeqCst)
469        }
470    }
471
472    impl Challenge<TestPhenotype> for TestChallenge {
473        fn score(&self, phenotype: &TestPhenotype) -> f64 {
474            // Increment evaluation counter
475            self.evaluations.fetch_add(1, Ordering::SeqCst);
476
477            // Simple fitness function: smaller values are better
478            -(phenotype.value as f64)
479        }
480    }
481
482    #[test]
483    fn test_with_global_cache() {
484        let challenge = TestChallenge::new();
485        let cached_challenge = challenge.with_global_cache();
486
487        let phenotype1 = TestPhenotype { value: 5 };
488        let phenotype2 = TestPhenotype { value: 10 };
489
490        // First evaluation should compute
491        let score1 = cached_challenge.score(&phenotype1);
492        assert_eq!(score1, -5.0);
493        assert_eq!(challenge.get_evaluations(), 1);
494
495        // Second evaluation of the same phenotype should use cache
496        let score1_again = cached_challenge.score(&phenotype1);
497        assert_eq!(score1_again, -5.0);
498        assert_eq!(challenge.get_evaluations(), 1); // Still 1
499
500        // Different phenotype should compute
501        let score2 = cached_challenge.score(&phenotype2);
502        assert_eq!(score2, -10.0);
503        assert_eq!(challenge.get_evaluations(), 2);
504    }
505
506    #[test]
507    fn test_with_thread_local_cache() {
508        let challenge = TestChallenge::new();
509        let cached_challenge = challenge.with_thread_local_cache();
510
511        let phenotype1 = TestPhenotype { value: 5 };
512        let phenotype2 = TestPhenotype { value: 10 };
513
514        // First evaluation should compute
515        let score1 = cached_challenge.score(&phenotype1);
516        assert_eq!(score1, -5.0);
517        assert_eq!(challenge.get_evaluations(), 1);
518
519        // Second evaluation of the same phenotype should use cache
520        let score1_again = cached_challenge.score(&phenotype1);
521        assert_eq!(score1_again, -5.0);
522        assert_eq!(challenge.get_evaluations(), 1); // Still 1
523
524        // Different phenotype should compute
525        let score2 = cached_challenge.score(&phenotype2);
526        assert_eq!(score2, -10.0);
527        assert_eq!(challenge.get_evaluations(), 2);
528    }
529
530    #[test]
531    fn test_with_cache() {
532        let challenge = TestChallenge::new();
533
534        // Test with global cache
535        let cached_challenge = challenge.clone().with_cache(CacheType::Global);
536
537        let phenotype = TestPhenotype { value: 5 };
538
539        let score1 = cached_challenge.score(&phenotype);
540        assert_eq!(score1, -5.0);
541
542        let score2 = cached_challenge.score(&phenotype);
543        assert_eq!(score2, -5.0);
544
545        // The evaluation count would be 1, but we can't access it through the dyn trait
546
547        // Test with thread local cache
548        let cached_challenge = challenge.clone().with_cache(CacheType::ThreadLocal);
549
550        let score1 = cached_challenge.score(&phenotype);
551        assert_eq!(score1, -5.0);
552
553        let score2 = cached_challenge.score(&phenotype);
554        assert_eq!(score2, -5.0);
555    }
556
557    #[test]
558    fn test_caching_challenge_switcher() {
559        let challenge = TestChallenge::new();
560
561        // Without caching
562        let switcher = CachingChallengeSwitcher::new(challenge.clone());
563
564        let phenotype = TestPhenotype { value: 5 };
565
566        let score1 = switcher.score(&phenotype);
567        assert_eq!(score1, -5.0);
568        assert_eq!(challenge.get_evaluations(), 1);
569
570        let score2 = switcher.score(&phenotype);
571        assert_eq!(score2, -5.0);
572        assert_eq!(challenge.get_evaluations(), 2); // No caching, so 2 evaluations
573
574        // With caching - note that the switcher currently ignores the cache type
575        // and just delegates to the inner challenge directly
576        let challenge = TestChallenge::new();
577        let switcher =
578            CachingChallengeSwitcher::new(challenge.clone()).with_cache(CacheType::Global);
579
580        let score1 = switcher.score(&phenotype);
581        assert_eq!(score1, -5.0);
582        assert_eq!(challenge.get_evaluations(), 1);
583
584        let score2 = switcher.score(&phenotype);
585        assert_eq!(score2, -5.0);
586        assert_eq!(challenge.get_evaluations(), 2); // The switcher doesn't implement caching yet
587    }
588}