Skip to main content

engine/
decay.rs

1//! Importance Decay Engine for Dakera AI Agent Memory Platform.
2//!
3//! Background task that periodically decays memory importance scores
4//! based on configurable strategies: Exponential, Linear, or StepFunction.
5//!
6//! On every recall hit, importance gets an access boost.
7//! Memories below `min_importance` are auto-deleted.
8
9use std::sync::Arc;
10
11use common::{DecayConfig, DecayStrategy, Memory, MemoryType, Vector};
12use serde::{Deserialize, Serialize};
13use storage::{RedisCache, VectorStorage};
14use tokio::sync::RwLock;
15use tracing;
16
17/// Importance Decay Engine that runs as a background task.
18pub struct DecayEngine {
19    pub config: DecayConfig,
20}
21
22/// Configuration loaded from environment variables.
23pub struct DecayEngineConfig {
24    /// Decay configuration (strategy, half_life, min_importance)
25    pub decay_config: DecayConfig,
26    /// How often to run decay (in seconds)
27    pub interval_secs: u64,
28}
29
30impl Default for DecayEngineConfig {
31    fn default() -> Self {
32        Self {
33            decay_config: DecayConfig {
34                strategy: DecayStrategy::Exponential,
35                half_life_hours: 168.0, // 1 week
36                min_importance: 0.01,
37            },
38            interval_secs: 3600, // 1 hour
39        }
40    }
41}
42
43impl DecayEngineConfig {
44    /// Load configuration from environment variables.
45    pub fn from_env() -> Self {
46        let half_life_hours: f64 = std::env::var("DAKERA_DECAY_HALF_LIFE_HOURS")
47            .ok()
48            .and_then(|v| v.parse().ok())
49            .unwrap_or(168.0);
50
51        let min_importance: f32 = std::env::var("DAKERA_DECAY_MIN_IMPORTANCE")
52            .ok()
53            .and_then(|v| v.parse().ok())
54            .unwrap_or(0.01);
55
56        let interval_secs: u64 = std::env::var("DAKERA_DECAY_INTERVAL_SECS")
57            .ok()
58            .and_then(|v| v.parse().ok())
59            .unwrap_or(3600);
60
61        let strategy_str =
62            std::env::var("DAKERA_DECAY_STRATEGY").unwrap_or_else(|_| "exponential".to_string());
63
64        let strategy = match strategy_str.to_lowercase().as_str() {
65            "linear" => DecayStrategy::Linear,
66            "step" | "stepfunction" | "step_function" => DecayStrategy::StepFunction,
67            _ => DecayStrategy::Exponential,
68        };
69
70        Self {
71            decay_config: DecayConfig {
72                strategy,
73                half_life_hours,
74                min_importance,
75            },
76            interval_secs,
77        }
78    }
79}
80
81impl DecayEngine {
82    /// Create a new DecayEngine with the given configuration.
83    pub fn new(config: DecayConfig) -> Self {
84        Self { config }
85    }
86
87    /// Calculate decayed importance for a single memory.
88    ///
89    /// Memory type determines base decay speed:
90    /// - Working:    3× faster (temporary by design)
91    /// - Episodic:   1× normal (events fade naturally)
92    /// - Semantic:   0.5× slower (knowledge persists)
93    /// - Procedural: 0.3× slower (skills are durable)
94    ///
95    /// Usage pattern shields from decay (diminishing returns).
96    /// Never-accessed memories fade 50% faster.
97    pub fn calculate_decay(
98        &self,
99        current_importance: f32,
100        hours_elapsed: f64,
101        memory_type: &MemoryType,
102        access_count: u32,
103    ) -> f32 {
104        if hours_elapsed <= 0.0 {
105            return current_importance;
106        }
107
108        // Memory type determines base decay speed
109        let type_multiplier = match memory_type {
110            MemoryType::Working => 3.0,
111            MemoryType::Episodic => 1.0,
112            MemoryType::Semantic => 0.5,
113            MemoryType::Procedural => 0.3,
114        };
115
116        // Usage pattern shields from decay (diminishing returns)
117        let usage_shield = if access_count > 0 {
118            1.0 / (1.0 + (access_count as f64 * 0.1))
119        } else {
120            1.5 // never accessed = 50% faster decay
121        };
122
123        let effective_half_life = self.config.half_life_hours / (type_multiplier * usage_shield);
124
125        let decayed = match self.config.strategy {
126            DecayStrategy::Exponential => {
127                let decay_factor = (0.5_f64).powf(hours_elapsed / effective_half_life);
128                current_importance * decay_factor as f32
129            }
130            DecayStrategy::Linear => {
131                let decay_amount = (hours_elapsed / effective_half_life) as f32 * 0.5;
132                (current_importance - decay_amount).max(0.0)
133            }
134            DecayStrategy::StepFunction => {
135                let steps = (hours_elapsed / effective_half_life).floor() as u32;
136                let decay_factor = (0.5_f32).powi(steps as i32);
137                current_importance * decay_factor
138            }
139        };
140
141        decayed.clamp(0.0, 1.0)
142    }
143
144    /// Calculate access boost for a memory that was just recalled.
145    /// Scales with current importance — valuable memories get bigger boosts.
146    pub fn access_boost(current_importance: f32) -> f32 {
147        let boost = 0.05 + 0.05 * current_importance; // 0.05–0.10 range
148        (current_importance + boost).min(1.0)
149    }
150
151    /// Apply decay to all memories across all agent namespaces.
152    ///
153    /// Iterates all namespaces prefixed with `_dakera_agent_`, loads memories,
154    /// applies decay based on time since last access, and removes memories
155    /// below the minimum importance threshold.
156    pub async fn apply_decay(&self, storage: &Arc<dyn VectorStorage>) -> DecayResult {
157        let mut result = DecayResult::default();
158
159        // List all namespaces
160        let namespaces = match storage.list_namespaces().await {
161            Ok(ns) => ns,
162            Err(e) => {
163                tracing::error!(error = %e, "Failed to list namespaces for decay");
164                return result;
165            }
166        };
167
168        let now = std::time::SystemTime::now()
169            .duration_since(std::time::UNIX_EPOCH)
170            .unwrap_or_default()
171            .as_secs();
172
173        // Only process Dakera agent namespaces
174        for namespace in namespaces {
175            if !namespace.starts_with("_dakera_agent_") {
176                continue;
177            }
178
179            result.namespaces_processed += 1;
180
181            let vectors = match storage.get_all(&namespace).await {
182                Ok(v) => v,
183                Err(e) => {
184                    tracing::warn!(
185                        namespace = %namespace,
186                        error = %e,
187                        "Failed to get vectors for decay"
188                    );
189                    continue;
190                }
191            };
192
193            let mut updated_vectors: Vec<Vector> = Vec::new();
194            let mut ids_to_delete: Vec<String> = Vec::new();
195
196            for vector in &vectors {
197                let memory = match Memory::from_vector(vector) {
198                    Some(m) => m,
199                    None => continue, // Skip non-memory vectors
200                };
201
202                result.memories_processed += 1;
203
204                // DECAY-3: Hard-delete memories whose explicit TTL has expired.
205                // expires_at bypasses decay scoring — immediate removal.
206                if let Some(exp) = memory.expires_at {
207                    if exp <= now {
208                        ids_to_delete.push(memory.id.clone());
209                        result.memories_deleted += 1;
210                        continue;
211                    }
212                }
213
214                // Calculate hours since last access
215                let hours_elapsed = if now > memory.last_accessed_at {
216                    (now - memory.last_accessed_at) as f64 / 3600.0
217                } else {
218                    0.0
219                };
220
221                let new_importance = self.calculate_decay(
222                    memory.importance,
223                    hours_elapsed,
224                    &memory.memory_type,
225                    memory.access_count,
226                );
227
228                // Check if below minimum importance
229                if new_importance < self.config.min_importance {
230                    ids_to_delete.push(memory.id.clone());
231                    result.memories_deleted += 1;
232                    continue;
233                }
234
235                // Only update if importance actually changed
236                if (new_importance - memory.importance).abs() > 0.001 {
237                    let mut updated_memory = memory;
238                    updated_memory.importance = new_importance;
239
240                    // Rebuild vector with updated metadata but same embedding
241                    let mut updated_vector = vector.clone();
242                    updated_vector.metadata = Some(updated_memory.to_vector_metadata());
243                    updated_vectors.push(updated_vector);
244                    result.memories_decayed += 1;
245                }
246            }
247
248            // Delete memories below threshold
249            if !ids_to_delete.is_empty() {
250                if let Err(e) = storage.delete(&namespace, &ids_to_delete).await {
251                    tracing::warn!(
252                        namespace = %namespace,
253                        count = ids_to_delete.len(),
254                        error = %e,
255                        "Failed to delete expired memories"
256                    );
257                }
258            }
259
260            // Upsert updated memories
261            if !updated_vectors.is_empty() {
262                if let Err(e) = storage.upsert(&namespace, updated_vectors).await {
263                    tracing::warn!(
264                        namespace = %namespace,
265                        error = %e,
266                        "Failed to upsert decayed memories"
267                    );
268                }
269            }
270        }
271
272        tracing::info!(
273            namespaces_processed = result.namespaces_processed,
274            memories_processed = result.memories_processed,
275            memories_decayed = result.memories_decayed,
276            memories_deleted = result.memories_deleted,
277            "Decay cycle completed"
278        );
279
280        result
281    }
282
283    /// Spawn the decay engine as a background tokio task.
284    ///
285    /// Takes a shared `Arc<RwLock<DecayConfig>>` so that config changes made at
286    /// runtime via `PUT /admin/decay/config` take effect without a server restart.
287    /// Each loop iteration re-reads the config before running, so strategy/half-life
288    /// changes apply on the next cycle.
289    pub fn spawn(
290        config: Arc<RwLock<DecayConfig>>,
291        interval_secs: u64,
292        storage: Arc<dyn VectorStorage>,
293        metrics: Arc<BackgroundMetrics>,
294        redis: Option<RedisCache>,
295        node_id: String,
296    ) -> tokio::task::JoinHandle<()> {
297        let interval = std::time::Duration::from_secs(interval_secs);
298        // Lock TTL = interval + 5 min safety margin, so a stale lock never blocks
299        // more than one missed cycle.
300        let lock_ttl = interval_secs + 300;
301        const LOCK_KEY: &str = "dakera:lock:decay";
302
303        tokio::spawn(async move {
304            tracing::info!(
305                interval_secs,
306                "Decay engine started (hot-reload config via PUT /admin/decay/config)"
307            );
308
309            loop {
310                tokio::time::sleep(interval).await;
311
312                // Leader election: acquire Redis lock before running decay.
313                // Graceful degradation: if Redis is unavailable, run anyway (in-process fallback).
314                let acquired = match redis {
315                    Some(ref rc) => rc.try_acquire_lock(LOCK_KEY, &node_id, lock_ttl).await,
316                    None => true, // No Redis configured — single-node mode, always run
317                };
318
319                if !acquired {
320                    tracing::debug!("Decay skipped — another replica holds the leader lock");
321                    continue;
322                }
323
324                // Re-read config before each cycle — picks up hot-reload changes
325                let current_config = config.read().await.clone();
326                let engine = DecayEngine::new(current_config);
327                let result = engine.apply_decay(&storage).await;
328                metrics.record_decay(&result);
329
330                // Release the lock so another replica can acquire it next cycle
331                // (or if this node crashes, the TTL covers cleanup).
332                if let Some(ref rc) = redis {
333                    rc.release_lock(LOCK_KEY, &node_id).await;
334                }
335            }
336        })
337    }
338}
339
340/// Result of a decay cycle.
341#[derive(Debug, Default, Clone, Serialize, Deserialize)]
342pub struct DecayResult {
343    pub namespaces_processed: usize,
344    pub memories_processed: usize,
345    pub memories_decayed: usize,
346    pub memories_deleted: usize,
347}
348
349/// Shared metrics for background activity tracking.
350///
351/// Updated by decay/autopilot spawn loops so the API can expose
352/// what background jobs are doing to user memories.
353/// Persisted to storage so metrics survive restarts.
354#[derive(Debug, Default)]
355pub struct BackgroundMetrics {
356    inner: std::sync::Mutex<BackgroundMetricsInner>,
357    /// Flag: dirty since last persist
358    dirty: std::sync::atomic::AtomicBool,
359}
360
361/// Max history data points kept (7 days at 1-hour decay interval).
362const MAX_HISTORY_POINTS: usize = 168;
363
364#[derive(Debug, Default, Clone, Serialize, Deserialize)]
365pub struct BackgroundMetricsInner {
366    /// Last decay cycle result
367    #[serde(default)]
368    pub last_decay: Option<DecayResult>,
369    /// Timestamp of last decay run (unix secs)
370    #[serde(default)]
371    pub last_decay_at: Option<u64>,
372    /// Cumulative memories deleted by decay
373    #[serde(default)]
374    pub total_decay_deleted: u64,
375    /// Cumulative memories decayed (importance lowered)
376    #[serde(default)]
377    pub total_decay_adjusted: u64,
378    /// Total number of decay cycles completed
379    #[serde(default)]
380    pub decay_cycles_run: u64,
381
382    /// Last dedup cycle result
383    #[serde(default)]
384    pub last_dedup: Option<DedupResultSnapshot>,
385    /// Timestamp of last dedup run (unix secs)
386    #[serde(default)]
387    pub last_dedup_at: Option<u64>,
388    /// Cumulative duplicates removed
389    #[serde(default)]
390    pub total_dedup_removed: u64,
391
392    /// Last consolidation cycle result
393    #[serde(default)]
394    pub last_consolidation: Option<ConsolidationResultSnapshot>,
395    /// Timestamp of last consolidation run (unix secs)
396    #[serde(default)]
397    pub last_consolidation_at: Option<u64>,
398    /// Cumulative memories consolidated
399    #[serde(default)]
400    pub total_consolidated: u64,
401
402    /// Historical data points for graphing (ring buffer, newest last)
403    #[serde(default)]
404    pub history: Vec<ActivityHistoryPoint>,
405}
406
407/// A single historical data point for the activity timeline graph.
408#[derive(Debug, Clone, Serialize, Deserialize)]
409pub struct ActivityHistoryPoint {
410    /// Unix timestamp (seconds)
411    pub timestamp: u64,
412    /// Memories deleted by decay in this cycle
413    pub decay_deleted: u64,
414    /// Memories adjusted by decay in this cycle
415    pub decay_adjusted: u64,
416    /// Duplicates removed in this cycle
417    pub dedup_removed: u64,
418    /// Memories consolidated in this cycle
419    pub consolidated: u64,
420}
421
422/// Serializable snapshot of a dedup result (avoids coupling to autopilot module).
423#[derive(Debug, Default, Clone, Serialize, Deserialize)]
424pub struct DedupResultSnapshot {
425    pub namespaces_processed: usize,
426    pub memories_scanned: usize,
427    pub duplicates_removed: usize,
428}
429
430/// Serializable snapshot of a consolidation result.
431#[derive(Debug, Default, Clone, Serialize, Deserialize)]
432pub struct ConsolidationResultSnapshot {
433    pub namespaces_processed: usize,
434    pub memories_scanned: usize,
435    pub clusters_merged: usize,
436    pub memories_consolidated: usize,
437}
438
439impl BackgroundMetrics {
440    pub fn new() -> Self {
441        Self::default()
442    }
443
444    /// Restore metrics from a previously persisted snapshot.
445    pub fn restore(inner: BackgroundMetricsInner) -> Self {
446        Self {
447            inner: std::sync::Mutex::new(inner),
448            dirty: std::sync::atomic::AtomicBool::new(false),
449        }
450    }
451
452    /// Whether metrics changed since last persist.
453    pub fn is_dirty(&self) -> bool {
454        self.dirty.load(std::sync::atomic::Ordering::Relaxed)
455    }
456
457    /// Clear the dirty flag after a successful persist.
458    pub fn clear_dirty(&self) {
459        self.dirty
460            .store(false, std::sync::atomic::Ordering::Relaxed);
461    }
462
463    /// Record a decay cycle result.
464    pub fn record_decay(&self, result: &DecayResult) {
465        let now = std::time::SystemTime::now()
466            .duration_since(std::time::UNIX_EPOCH)
467            .unwrap_or_default()
468            .as_secs();
469        let mut inner = self.inner.lock().unwrap_or_else(|e| e.into_inner());
470        inner.total_decay_deleted += result.memories_deleted as u64;
471        inner.total_decay_adjusted += result.memories_decayed as u64;
472        inner.decay_cycles_run += 1;
473        inner.last_decay = Some(result.clone());
474        inner.last_decay_at = Some(now);
475        // Append history point
476        push_history(
477            &mut inner.history,
478            ActivityHistoryPoint {
479                timestamp: now,
480                decay_deleted: result.memories_deleted as u64,
481                decay_adjusted: result.memories_decayed as u64,
482                dedup_removed: 0,
483                consolidated: 0,
484            },
485        );
486        self.dirty.store(true, std::sync::atomic::Ordering::Relaxed);
487    }
488
489    /// Record a dedup cycle result.
490    pub fn record_dedup(
491        &self,
492        namespaces_processed: usize,
493        memories_scanned: usize,
494        duplicates_removed: usize,
495    ) {
496        let now = std::time::SystemTime::now()
497            .duration_since(std::time::UNIX_EPOCH)
498            .unwrap_or_default()
499            .as_secs();
500        let mut inner = self.inner.lock().unwrap_or_else(|e| e.into_inner());
501        inner.total_dedup_removed += duplicates_removed as u64;
502        inner.last_dedup = Some(DedupResultSnapshot {
503            namespaces_processed,
504            memories_scanned,
505            duplicates_removed,
506        });
507        inner.last_dedup_at = Some(now);
508        push_history(
509            &mut inner.history,
510            ActivityHistoryPoint {
511                timestamp: now,
512                decay_deleted: 0,
513                decay_adjusted: 0,
514                dedup_removed: duplicates_removed as u64,
515                consolidated: 0,
516            },
517        );
518        self.dirty.store(true, std::sync::atomic::Ordering::Relaxed);
519    }
520
521    /// Record a consolidation cycle result.
522    pub fn record_consolidation(
523        &self,
524        namespaces_processed: usize,
525        memories_scanned: usize,
526        clusters_merged: usize,
527        memories_consolidated: usize,
528    ) {
529        let now = std::time::SystemTime::now()
530            .duration_since(std::time::UNIX_EPOCH)
531            .unwrap_or_default()
532            .as_secs();
533        let mut inner = self.inner.lock().unwrap_or_else(|e| e.into_inner());
534        inner.total_consolidated += memories_consolidated as u64;
535        inner.last_consolidation = Some(ConsolidationResultSnapshot {
536            namespaces_processed,
537            memories_scanned,
538            clusters_merged,
539            memories_consolidated,
540        });
541        inner.last_consolidation_at = Some(now);
542        push_history(
543            &mut inner.history,
544            ActivityHistoryPoint {
545                timestamp: now,
546                decay_deleted: 0,
547                decay_adjusted: 0,
548                dedup_removed: 0,
549                consolidated: memories_consolidated as u64,
550            },
551        );
552        self.dirty.store(true, std::sync::atomic::Ordering::Relaxed);
553    }
554
555    /// Replace the inner state with a restored snapshot (used on startup).
556    pub fn restore_into(&self, restored: BackgroundMetricsInner) {
557        let mut inner = self.inner.lock().unwrap_or_else(|e| e.into_inner());
558        *inner = restored;
559        // Don't set dirty — this was just loaded from storage
560    }
561
562    /// Get a snapshot of all metrics for API response.
563    pub fn snapshot(&self) -> BackgroundMetricsInner {
564        self.inner.lock().unwrap_or_else(|e| e.into_inner()).clone()
565    }
566}
567
568/// Append a history point, capping at MAX_HISTORY_POINTS.
569fn push_history(history: &mut Vec<ActivityHistoryPoint>, point: ActivityHistoryPoint) {
570    history.push(point);
571    if history.len() > MAX_HISTORY_POINTS {
572        let excess = history.len() - MAX_HISTORY_POINTS;
573        history.drain(..excess);
574    }
575}
576
577#[cfg(test)]
578mod tests {
579    use super::*;
580    use std::sync::Mutex;
581
582    // Shared lock for every test that reads or writes DAKERA_DECAY_* env vars.
583    // Function-local `static ENV_LOCK` creates a separate mutex per test function,
584    // which means the locks are completely independent and offer no protection
585    // across concurrent tests. All env-var tests must share this single lock.
586    static ENV_LOCK: Mutex<()> = Mutex::new(());
587
588    fn make_engine(strategy: DecayStrategy, half_life: f64) -> DecayEngine {
589        DecayEngine::new(DecayConfig {
590            strategy,
591            half_life_hours: half_life,
592            min_importance: 0.01,
593        })
594    }
595
596    // Default args: Episodic type, 0 access count (same as old behavior with multiplier=1.0, shield=1.5)
597    const EPISODIC: MemoryType = MemoryType::Episodic;
598
599    #[test]
600    fn test_exponential_decay_at_half_life_episodic_no_access() {
601        let engine = make_engine(DecayStrategy::Exponential, 168.0);
602        // Effective half-life = 168 / (1.0 * 1.5) = 112h (faster for never-accessed)
603        let result = engine.calculate_decay(1.0, 112.0, &EPISODIC, 0);
604        assert!((result - 0.5).abs() < 0.01, "Expected ~0.5, got {}", result);
605    }
606
607    #[test]
608    fn test_exponential_decay_zero_time() {
609        let engine = make_engine(DecayStrategy::Exponential, 168.0);
610        let result = engine.calculate_decay(0.8, 0.0, &EPISODIC, 0);
611        assert!((result - 0.8).abs() < 0.001);
612    }
613
614    #[test]
615    fn test_linear_decay_floors_at_zero() {
616        let engine = make_engine(DecayStrategy::Linear, 168.0);
617        let result = engine.calculate_decay(0.3, 168.0, &EPISODIC, 0);
618        assert!(result >= 0.0, "Should not go below 0, got {}", result);
619    }
620
621    #[test]
622    fn test_procedural_decays_slower_than_working() {
623        let engine = make_engine(DecayStrategy::Exponential, 168.0);
624        let working = engine.calculate_decay(1.0, 168.0, &MemoryType::Working, 0);
625        let procedural = engine.calculate_decay(1.0, 168.0, &MemoryType::Procedural, 0);
626        assert!(
627            procedural > working,
628            "Procedural ({}) should decay slower than Working ({})",
629            procedural,
630            working
631        );
632    }
633
634    #[test]
635    fn test_high_access_count_decays_slower() {
636        let engine = make_engine(DecayStrategy::Exponential, 168.0);
637        let no_access = engine.calculate_decay(1.0, 168.0, &EPISODIC, 0);
638        let high_access = engine.calculate_decay(1.0, 168.0, &EPISODIC, 10);
639        assert!(
640            high_access > no_access,
641            "High access ({}) should decay slower than no access ({})",
642            high_access,
643            no_access
644        );
645    }
646
647    #[test]
648    fn test_semantic_decays_slower_than_episodic() {
649        let engine = make_engine(DecayStrategy::Exponential, 168.0);
650        let episodic = engine.calculate_decay(1.0, 168.0, &EPISODIC, 5);
651        let semantic = engine.calculate_decay(1.0, 168.0, &MemoryType::Semantic, 5);
652        assert!(
653            semantic > episodic,
654            "Semantic ({}) should decay slower than Episodic ({})",
655            semantic,
656            episodic
657        );
658    }
659
660    #[test]
661    fn test_access_boost_scales_with_importance() {
662        let low = DecayEngine::access_boost(0.2);
663        let high = DecayEngine::access_boost(0.8);
664        let boost_low = low - 0.2;
665        let boost_high = high - 0.8;
666        assert!(
667            boost_high > boost_low,
668            "High-importance boost ({}) should be larger than low-importance boost ({})",
669            boost_high,
670            boost_low
671        );
672        // Verify range: 0.05 + 0.05*importance
673        assert!((boost_low - (0.05 + 0.05 * 0.2)).abs() < 0.001);
674        assert!((boost_high - (0.05 + 0.05 * 0.8)).abs() < 0.001);
675    }
676
677    #[test]
678    fn test_access_boost_caps_at_one() {
679        assert!((DecayEngine::access_boost(1.0) - 1.0).abs() < 0.001);
680        assert!((DecayEngine::access_boost(0.96) - 1.0).abs() < 0.001);
681    }
682
683    #[test]
684    fn test_decay_clamps_to_range() {
685        let engine = make_engine(DecayStrategy::Exponential, 1.0);
686        let result = engine.calculate_decay(0.001, 100.0, &EPISODIC, 0);
687        assert!(result >= 0.0 && result <= 1.0);
688    }
689
690    #[test]
691    fn test_step_function_decay() {
692        let engine = make_engine(DecayStrategy::StepFunction, 168.0);
693        // Effective half-life for Episodic+0 access = 168/1.5 = 112h
694        let eff_hl = 168.0 / 1.5;
695
696        // Before first step - no decay
697        let result = engine.calculate_decay(1.0, eff_hl * 0.5, &EPISODIC, 0);
698        assert!((result - 1.0).abs() < 0.001);
699
700        // At first step
701        let result = engine.calculate_decay(1.0, eff_hl, &EPISODIC, 0);
702        assert!((result - 0.5).abs() < 0.001);
703    }
704
705    // ── DecayEngine::new ─────────────────────────────────────────────────────
706
707    #[test]
708    fn test_decay_engine_new_stores_config() {
709        let cfg = DecayConfig {
710            strategy: DecayStrategy::Linear,
711            half_life_hours: 48.0,
712            min_importance: 0.05,
713        };
714        let engine = DecayEngine::new(cfg.clone());
715        assert!(matches!(engine.config.strategy, DecayStrategy::Linear));
716        assert!((engine.config.half_life_hours - 48.0).abs() < 1e-9);
717        assert!((engine.config.min_importance - 0.05).abs() < 1e-6);
718    }
719
720    // ── DecayEngineConfig::default ────────────────────────────────────────────
721
722    #[test]
723    fn test_decay_engine_config_default_values() {
724        let cfg = DecayEngineConfig::default();
725        assert!(matches!(
726            cfg.decay_config.strategy,
727            DecayStrategy::Exponential
728        ));
729        assert!((cfg.decay_config.half_life_hours - 168.0).abs() < 1e-9);
730        assert!((cfg.decay_config.min_importance - 0.01).abs() < 1e-6);
731        assert_eq!(cfg.interval_secs, 3600);
732    }
733
734    // ── DecayEngineConfig::from_env ────────────────────────────────────────────
735
736    #[test]
737    fn test_decay_engine_config_from_env_defaults_without_vars() {
738        let _guard = ENV_LOCK.lock().unwrap();
739        // With no env vars set, from_env should return the same as default()
740        std::env::remove_var("DAKERA_DECAY_HALF_LIFE_HOURS");
741        std::env::remove_var("DAKERA_DECAY_MIN_IMPORTANCE");
742        std::env::remove_var("DAKERA_DECAY_INTERVAL_SECS");
743        std::env::remove_var("DAKERA_DECAY_STRATEGY");
744        let cfg = DecayEngineConfig::from_env();
745        assert!(matches!(
746            cfg.decay_config.strategy,
747            DecayStrategy::Exponential
748        ));
749        assert!((cfg.decay_config.half_life_hours - 168.0).abs() < 1e-9);
750    }
751
752    #[test]
753    fn test_decay_engine_config_from_env_linear_strategy() {
754        let _guard = ENV_LOCK.lock().unwrap();
755
756        std::env::set_var("DAKERA_DECAY_STRATEGY", "linear");
757        let cfg = DecayEngineConfig::from_env();
758        std::env::remove_var("DAKERA_DECAY_STRATEGY");
759        assert!(matches!(cfg.decay_config.strategy, DecayStrategy::Linear));
760    }
761
762    #[test]
763    fn test_decay_engine_config_from_env_step_strategy() {
764        let _guard = ENV_LOCK.lock().unwrap();
765
766        std::env::set_var("DAKERA_DECAY_STRATEGY", "step");
767        let cfg = DecayEngineConfig::from_env();
768        std::env::remove_var("DAKERA_DECAY_STRATEGY");
769        assert!(matches!(
770            cfg.decay_config.strategy,
771            DecayStrategy::StepFunction
772        ));
773    }
774
775    #[test]
776    fn test_decay_engine_config_from_env_unknown_strategy_defaults_to_exponential() {
777        let _guard = ENV_LOCK.lock().unwrap();
778
779        std::env::set_var("DAKERA_DECAY_STRATEGY", "bogus");
780        let cfg = DecayEngineConfig::from_env();
781        std::env::remove_var("DAKERA_DECAY_STRATEGY");
782        assert!(matches!(
783            cfg.decay_config.strategy,
784            DecayStrategy::Exponential
785        ));
786    }
787
788    // ── push_history capping ─────────────────────────────────────────────────
789
790    #[test]
791    fn test_push_history_caps_at_max() {
792        let mut history: Vec<ActivityHistoryPoint> = Vec::new();
793        // Fill past the cap
794        for i in 0..(MAX_HISTORY_POINTS + 10) {
795            push_history(
796                &mut history,
797                ActivityHistoryPoint {
798                    timestamp: i as u64,
799                    decay_deleted: 0,
800                    decay_adjusted: 0,
801                    dedup_removed: 0,
802                    consolidated: 0,
803                },
804            );
805        }
806        assert_eq!(history.len(), MAX_HISTORY_POINTS);
807        // Oldest entries are evicted; newest is last
808        assert_eq!(
809            history.last().unwrap().timestamp,
810            (MAX_HISTORY_POINTS + 9) as u64
811        );
812    }
813
814    #[test]
815    fn test_push_history_below_cap_grows_normally() {
816        let mut history: Vec<ActivityHistoryPoint> = Vec::new();
817        for i in 0..5 {
818            push_history(
819                &mut history,
820                ActivityHistoryPoint {
821                    timestamp: i,
822                    decay_deleted: 0,
823                    decay_adjusted: 0,
824                    dedup_removed: 0,
825                    consolidated: 0,
826                },
827            );
828        }
829        assert_eq!(history.len(), 5);
830    }
831
832    // ── BackgroundMetrics ────────────────────────────────────────────────────
833
834    #[test]
835    fn test_background_metrics_new_not_dirty() {
836        let m = BackgroundMetrics::new();
837        assert!(!m.is_dirty());
838    }
839
840    #[test]
841    fn test_background_metrics_record_decay_sets_dirty() {
842        let m = BackgroundMetrics::new();
843        let result = DecayResult {
844            namespaces_processed: 1,
845            memories_processed: 10,
846            memories_decayed: 3,
847            memories_deleted: 1,
848        };
849        m.record_decay(&result);
850        assert!(m.is_dirty());
851    }
852
853    #[test]
854    fn test_background_metrics_clear_dirty() {
855        let m = BackgroundMetrics::new();
856        let result = DecayResult::default();
857        m.record_decay(&result);
858        assert!(m.is_dirty());
859        m.clear_dirty();
860        assert!(!m.is_dirty());
861    }
862
863    #[test]
864    fn test_background_metrics_snapshot_totals() {
865        let m = BackgroundMetrics::new();
866        m.record_decay(&DecayResult {
867            namespaces_processed: 2,
868            memories_processed: 20,
869            memories_decayed: 5,
870            memories_deleted: 2,
871        });
872        m.record_decay(&DecayResult {
873            namespaces_processed: 1,
874            memories_processed: 5,
875            memories_decayed: 1,
876            memories_deleted: 1,
877        });
878        let snap = m.snapshot();
879        assert_eq!(snap.total_decay_deleted, 3); // 2 + 1
880        assert_eq!(snap.decay_cycles_run, 2);
881    }
882
883    #[test]
884    fn test_background_metrics_record_dedup() {
885        let m = BackgroundMetrics::new();
886        m.record_dedup(2, 100, 5);
887        let snap = m.snapshot();
888        assert_eq!(snap.total_dedup_removed, 5);
889        assert!(snap.last_dedup.is_some());
890    }
891
892    #[test]
893    fn test_background_metrics_record_consolidation() {
894        let m = BackgroundMetrics::new();
895        m.record_consolidation(1, 30, 2, 6);
896        let snap = m.snapshot();
897        assert_eq!(snap.total_consolidated, 6);
898        assert!(snap.last_consolidation.is_some());
899    }
900
901    #[test]
902    fn test_background_metrics_restore() {
903        let inner = BackgroundMetricsInner {
904            total_decay_deleted: 42,
905            decay_cycles_run: 7,
906            ..Default::default()
907        };
908        let m = BackgroundMetrics::restore(inner);
909        assert!(!m.is_dirty()); // restore does not dirty
910        assert_eq!(m.snapshot().total_decay_deleted, 42);
911        assert_eq!(m.snapshot().decay_cycles_run, 7);
912    }
913
914    // ── additional calculate_decay branches ──────────────────────────────────
915
916    #[test]
917    fn test_linear_decay_formula() {
918        let engine = make_engine(DecayStrategy::Linear, 100.0);
919        // Effective half-life for Episodic+1 access = 100 / (1.0 * (1/(1+0.1))) = 110h
920        // decay_amount = (hours / eff_hl) * 0.5
921        // At hours=55: decay_amount = (55/110) * 0.5 = 0.25; result = 1.0 - 0.25 = 0.75
922        let eff_hl = 100.0 / (1.0 * (1.0 / (1.0 + 0.1)));
923        let decay_amount = (55.0 / eff_hl) * 0.5;
924        let expected = (1.0_f32 - decay_amount as f32).max(0.0);
925        let result = engine.calculate_decay(1.0, 55.0, &EPISODIC, 1);
926        assert!(
927            (result - expected).abs() < 0.01,
928            "expected ~{expected}, got {result}"
929        );
930    }
931
932    #[test]
933    fn test_working_memory_decays_fastest() {
934        let engine = make_engine(DecayStrategy::Exponential, 168.0);
935        let working = engine.calculate_decay(1.0, 168.0, &MemoryType::Working, 5);
936        let episodic = engine.calculate_decay(1.0, 168.0, &EPISODIC, 5);
937        let semantic = engine.calculate_decay(1.0, 168.0, &MemoryType::Semantic, 5);
938        let procedural = engine.calculate_decay(1.0, 168.0, &MemoryType::Procedural, 5);
939        assert!(working < episodic);
940        assert!(episodic < semantic);
941        assert!(semantic < procedural);
942    }
943
944    #[test]
945    fn test_access_boost_minimum_is_0_05() {
946        // access_boost(0) = 0.05 + 0.05*0 = 0.05 → result = 0.0 + 0.05 = 0.05
947        let result = DecayEngine::access_boost(0.0);
948        assert!((result - 0.05).abs() < 0.001, "expected 0.05, got {result}");
949    }
950}