Skip to main content

lean_ctx/core/
feedback.rs

1use std::collections::HashMap;
2
3use serde::{Deserialize, Serialize};
4
5/// Feedback loop for learning optimal compression parameters.
6///
7/// Tracks compression outcomes per session and learns which
8/// threshold combinations lead to fewer turns and higher success rates.
9
10#[derive(Debug, Clone, Serialize, Deserialize, Default)]
11pub struct CompressionOutcome {
12    pub session_id: String,
13    pub language: String,
14    pub entropy_threshold: f64,
15    pub jaccard_threshold: f64,
16    pub total_turns: u32,
17    pub tokens_saved: u64,
18    pub tokens_original: u64,
19    pub cache_hits: u32,
20    pub total_reads: u32,
21    pub task_completed: bool,
22    pub timestamp: String,
23}
24
25#[derive(Debug, Clone, Serialize, Deserialize, Default)]
26pub struct FeedbackStore {
27    pub outcomes: Vec<CompressionOutcome>,
28    pub learned_thresholds: HashMap<String, LearnedThresholds>,
29}
30
31#[derive(Debug, Clone, Serialize, Deserialize)]
32pub struct LearnedThresholds {
33    pub entropy: f64,
34    pub jaccard: f64,
35    pub sample_count: u32,
36    pub avg_efficiency: f64,
37}
38
39impl FeedbackStore {
40    pub fn load() -> Self {
41        let path = feedback_path();
42        if path.exists() {
43            if let Ok(content) = std::fs::read_to_string(&path) {
44                if let Ok(store) = serde_json::from_str(&content) {
45                    return store;
46                }
47            }
48        }
49        Self::default()
50    }
51
52    pub fn save(&self) {
53        let path = feedback_path();
54        if let Some(parent) = path.parent() {
55            let _ = std::fs::create_dir_all(parent);
56        }
57        if let Ok(json) = serde_json::to_string_pretty(self) {
58            let _ = std::fs::write(path, json);
59        }
60    }
61
62    pub fn record_outcome(&mut self, outcome: CompressionOutcome) {
63        let lang = outcome.language.clone();
64        self.outcomes.push(outcome);
65
66        // Keep last 200 outcomes to prevent unbounded growth
67        if self.outcomes.len() > 200 {
68            self.outcomes.drain(0..self.outcomes.len() - 200);
69        }
70
71        self.update_learned_thresholds(&lang);
72        self.save();
73    }
74
75    fn update_learned_thresholds(&mut self, language: &str) {
76        let relevant: Vec<&CompressionOutcome> = self
77            .outcomes
78            .iter()
79            .filter(|o| o.language == language && o.task_completed)
80            .collect();
81
82        if relevant.len() < 5 {
83            return; // not enough data to learn
84        }
85
86        // Find the threshold combination that maximizes efficiency
87        // Efficiency = tokens_saved / tokens_original * (1 / total_turns)
88        let mut best_entropy = 1.0;
89        let mut best_jaccard = 0.7;
90        let mut best_efficiency = 0.0;
91
92        for outcome in &relevant {
93            let compression_ratio = if outcome.tokens_original > 0 {
94                outcome.tokens_saved as f64 / outcome.tokens_original as f64
95            } else {
96                0.0
97            };
98            let turn_efficiency = 1.0 / (outcome.total_turns.max(1) as f64);
99            let efficiency = compression_ratio * 0.6 + turn_efficiency * 0.4;
100
101            if efficiency > best_efficiency {
102                best_efficiency = efficiency;
103                best_entropy = outcome.entropy_threshold;
104                best_jaccard = outcome.jaccard_threshold;
105            }
106        }
107
108        // Weighted average with current learned values for stability
109        let entry = self
110            .learned_thresholds
111            .entry(language.to_string())
112            .or_insert(LearnedThresholds {
113                entropy: best_entropy,
114                jaccard: best_jaccard,
115                sample_count: 0,
116                avg_efficiency: 0.0,
117            });
118
119        let momentum = 0.7; // favor existing values for stability
120        entry.entropy = entry.entropy * momentum + best_entropy * (1.0 - momentum);
121        entry.jaccard = entry.jaccard * momentum + best_jaccard * (1.0 - momentum);
122        entry.sample_count = relevant.len() as u32;
123        entry.avg_efficiency = best_efficiency;
124    }
125
126    pub fn get_learned_entropy(&self, language: &str) -> Option<f64> {
127        self.learned_thresholds.get(language).map(|t| t.entropy)
128    }
129
130    pub fn get_learned_jaccard(&self, language: &str) -> Option<f64> {
131        self.learned_thresholds.get(language).map(|t| t.jaccard)
132    }
133
134    pub fn format_report(&self) -> String {
135        let mut lines = vec![String::from("Feedback Loop Report")];
136        lines.push(format!("Total outcomes tracked: {}", self.outcomes.len()));
137        lines.push(String::new());
138
139        if self.learned_thresholds.is_empty() {
140            lines.push(
141                "No learned thresholds yet (need 5+ completed sessions per language).".to_string(),
142            );
143        } else {
144            lines.push("Learned Thresholds:".to_string());
145            for (lang, t) in &self.learned_thresholds {
146                lines.push(format!(
147                    "  {lang}: entropy={:.2} jaccard={:.2} (n={}, eff={:.1}%)",
148                    t.entropy,
149                    t.jaccard,
150                    t.sample_count,
151                    t.avg_efficiency * 100.0
152                ));
153            }
154        }
155
156        lines.join("\n")
157    }
158}
159
160fn feedback_path() -> std::path::PathBuf {
161    dirs::home_dir()
162        .unwrap_or_else(|| std::path::PathBuf::from("."))
163        .join(".lean-ctx")
164        .join("feedback.json")
165}
166
167#[cfg(test)]
168mod tests {
169    use super::*;
170
171    #[test]
172    fn empty_store_loads() {
173        let store = FeedbackStore::default();
174        assert!(store.outcomes.is_empty());
175        assert!(store.learned_thresholds.is_empty());
176    }
177
178    #[test]
179    fn learned_thresholds_need_minimum_samples() {
180        let mut store = FeedbackStore::default();
181        for i in 0..3 {
182            store.record_outcome(CompressionOutcome {
183                session_id: format!("s{i}"),
184                language: "rs".to_string(),
185                entropy_threshold: 0.85,
186                jaccard_threshold: 0.72,
187                total_turns: 5,
188                tokens_saved: 1000,
189                tokens_original: 2000,
190                cache_hits: 3,
191                total_reads: 10,
192                task_completed: true,
193                timestamp: String::new(),
194            });
195        }
196        assert!(store.get_learned_entropy("rs").is_none()); // only 3, need 5
197    }
198}