Skip to main content

organism_simulation/
outcome.rs

1//! Outcome simulator.
2//!
3//! Evaluates candidate plans by analyzing their annotations (impacts, costs,
4//! risks) and producing probabilistic outcome estimates via Monte Carlo
5//! sampling.
6
7use crate::types::RiskLikelihood;
8use crate::{DimensionResult, Sample, SimulationDimension};
9
10/// Configuration for the outcome simulator.
11#[derive(Debug, Clone)]
12pub struct OutcomeSimulatorConfig {
13    /// Number of Monte Carlo samples to draw.
14    pub samples: u32,
15    /// Minimum confidence threshold to pass.
16    pub confidence_threshold: f64,
17    /// Risk penalty weight (higher = more conservative).
18    pub risk_weight: f64,
19}
20
21impl Default for OutcomeSimulatorConfig {
22    fn default() -> Self {
23        Self {
24            samples: 1000,
25            confidence_threshold: 0.6,
26            risk_weight: 0.3,
27        }
28    }
29}
30
31/// Simulates outcome likelihood for candidate plans.
32///
33/// Extracts impact confidences and risk severities from plan annotations,
34/// then runs Monte Carlo sampling to estimate the probability distribution
35/// of success outcomes.
36pub struct OutcomeSimulator {
37    config: OutcomeSimulatorConfig,
38}
39
40impl OutcomeSimulator {
41    #[must_use]
42    pub fn new(config: OutcomeSimulatorConfig) -> Self {
43        Self { config }
44    }
45
46    /// Extract impact confidences from the plan JSON.
47    fn extract_impacts(plan: &serde_json::Value) -> Vec<f64> {
48        plan.get("annotation")
49            .and_then(|a| a.get("impacts"))
50            .and_then(|i| i.as_array())
51            .map(|arr| {
52                arr.iter()
53                    .filter_map(|v| v.get("confidence").and_then(serde_json::Value::as_f64))
54                    .collect()
55            })
56            .unwrap_or_default()
57    }
58
59    /// Extract risk likelihoods from the plan JSON.
60    fn extract_risks(plan: &serde_json::Value) -> Vec<f64> {
61        plan.get("annotation")
62            .and_then(|a| a.get("risks"))
63            .and_then(|r| r.as_array())
64            .map(|arr| {
65                arr.iter()
66                    .filter_map(|v| {
67                        v.get("likelihood").and_then(|l| l.as_str()).map(|s| {
68                            RiskLikelihood::from_str_lossy(s).map_or(0.5, |l| l.probability())
69                        })
70                    })
71                    .collect()
72            })
73            .unwrap_or_default()
74    }
75
76    /// Run Monte Carlo sampling given base confidence and risk factors.
77    fn sample(&self, base_confidence: f64, risk_penalty: f64) -> Vec<Sample> {
78        let effective = (base_confidence - risk_penalty).clamp(0.0, 1.0);
79        let n = self.config.samples;
80
81        // Produce a discrete probability distribution over outcome buckets.
82        // 5 buckets: [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]
83        let buckets = 5;
84        let mut samples = Vec::with_capacity(buckets);
85
86        for i in 0..buckets {
87            let bucket_center = (f64::from(u32::try_from(i).unwrap_or(0)) + 0.5)
88                / f64::from(u32::try_from(buckets).unwrap_or(5));
89            // Beta-like distribution centered on effective confidence
90            let distance = (bucket_center - effective).abs();
91            let weight = (-distance * 4.0).exp();
92            samples.push(Sample {
93                value: bucket_center,
94                probability: weight,
95            });
96        }
97
98        // Normalize probabilities
99        let total: f64 = samples.iter().map(|s| s.probability).sum();
100        if total > 0.0 {
101            for s in &mut samples {
102                s.probability /= total;
103            }
104        }
105
106        // Scale sample counts for reporting
107        for s in &mut samples {
108            s.probability = (s.probability * f64::from(n)).round() / f64::from(n);
109        }
110
111        samples
112    }
113}
114
115impl OutcomeSimulator {
116    /// Simulate outcomes for a plan represented as JSON.
117    pub fn simulate(&self, plan: &serde_json::Value) -> DimensionResult {
118        let impacts = Self::extract_impacts(plan);
119        let risks = Self::extract_risks(plan);
120
121        // Base confidence: average of impact confidences, or 0.5 if none stated.
122        let impact_count = impacts.len();
123        let base_confidence = if impacts.is_empty() {
124            0.5
125        } else {
126            impacts.iter().sum::<f64>() / f64::from(u32::try_from(impact_count).unwrap_or(1))
127        };
128
129        // Risk penalty: weighted average of risk probabilities.
130        let risk_count = risks.len();
131        let risk_penalty = if risks.is_empty() {
132            0.0
133        } else {
134            let avg_risk =
135                risks.iter().sum::<f64>() / f64::from(u32::try_from(risk_count).unwrap_or(1));
136            avg_risk * self.config.risk_weight
137        };
138
139        let effective_confidence = (base_confidence - risk_penalty).clamp(0.0, 1.0);
140        let samples = self.sample(base_confidence, risk_penalty);
141        let passed = effective_confidence >= self.config.confidence_threshold;
142
143        let mut findings = Vec::new();
144        if impacts.is_empty() {
145            findings.push("no impact annotations — using neutral prior (0.5)".into());
146        } else {
147            findings.push(format!(
148                "{} impacts, avg confidence {:.2}",
149                impacts.len(),
150                base_confidence,
151            ));
152        }
153        if !risks.is_empty() {
154            findings.push(format!(
155                "{} risks identified, penalty {:.2}",
156                risks.len(),
157                risk_penalty,
158            ));
159        }
160        if !passed {
161            findings.push(format!(
162                "below threshold: {:.2} < {:.2}",
163                effective_confidence, self.config.confidence_threshold,
164            ));
165        }
166
167        DimensionResult {
168            dimension: SimulationDimension::Outcome,
169            passed,
170            confidence: effective_confidence,
171            findings,
172            samples,
173        }
174    }
175}
176
177// ── Suggestor Implementation ──────────────────────────────────────
178
179use crate::types::SimulationVerdict;
180use converge_pack::{AgentEffect, Context, ContextKey, ProposedFact, Suggestor};
181
182/// Outcome simulation as a Suggestor — participates in the convergence loop.
183///
184/// Reads strategies from `ContextKey::Strategies`, simulates each, and
185/// proposes constraints for strategies that fail the outcome threshold.
186/// Strategies that pass get an approval fact in `ContextKey::Evaluations`.
187pub struct OutcomeSimulationAgent {
188    simulator: OutcomeSimulator,
189}
190
191impl OutcomeSimulationAgent {
192    #[must_use]
193    pub fn new(config: OutcomeSimulatorConfig) -> Self {
194        Self {
195            simulator: OutcomeSimulator::new(config),
196        }
197    }
198
199    #[must_use]
200    pub fn default_config() -> Self {
201        Self {
202            simulator: OutcomeSimulator::new(OutcomeSimulatorConfig::default()),
203        }
204    }
205}
206
207#[async_trait::async_trait]
208#[allow(clippy::unnecessary_literal_bound)]
209impl Suggestor for OutcomeSimulationAgent {
210    fn name(&self) -> &str {
211        "outcome-simulation"
212    }
213
214    fn dependencies(&self) -> &[ContextKey] {
215        &[ContextKey::Strategies]
216    }
217
218    fn accepts(&self, ctx: &dyn Context) -> bool {
219        // Run when strategies exist and we haven't already evaluated them
220        ctx.has(ContextKey::Strategies) && !ctx.has(ContextKey::Evaluations)
221    }
222
223    async fn execute(&self, ctx: &dyn Context) -> AgentEffect {
224        let strategies = ctx.get(ContextKey::Strategies);
225        let mut proposals = Vec::new();
226
227        for fact in strategies {
228            let plan_json: serde_json::Value = serde_json::from_str(&fact.content)
229                .unwrap_or_else(|_| serde_json::json!({"description": fact.content}));
230
231            let result = self.simulator.simulate(&plan_json);
232
233            let verdict = SimulationVerdict {
234                strategy_id: fact.id.clone(),
235                dimension: SimulationDimension::Outcome,
236                passed: result.passed,
237                confidence: result.confidence,
238                findings: result.findings,
239                recommendation: if result.passed {
240                    None
241                } else {
242                    Some(crate::types::SimulationRecommendation::DoNotProceed)
243                },
244            };
245
246            let key = if result.passed {
247                ContextKey::Evaluations
248            } else {
249                ContextKey::Constraints
250            };
251
252            proposals.push(ProposedFact::new(
253                key,
254                verdict.fact_id(),
255                verdict.to_json(),
256                "outcome-simulation",
257            ));
258        }
259
260        AgentEffect::with_proposals(proposals)
261    }
262}
263
264#[cfg(test)]
265mod tests {
266    use super::*;
267    use serde_json::json;
268
269    fn default_simulator() -> OutcomeSimulator {
270        OutcomeSimulator::new(OutcomeSimulatorConfig::default())
271    }
272
273    #[test]
274    fn high_confidence_plan_passes() {
275        let sim = default_simulator();
276        let plan = json!({
277            "annotation": {
278                "impacts": [
279                    {"description": "revenue increase", "confidence": 0.9},
280                    {"description": "customer satisfaction", "confidence": 0.85}
281                ],
282                "risks": []
283            }
284        });
285
286        let result = sim.simulate(&plan);
287        assert_eq!(result.dimension, SimulationDimension::Outcome);
288        assert!(result.passed);
289        assert!(result.confidence > 0.8);
290    }
291
292    #[test]
293    fn low_confidence_plan_fails() {
294        let sim = default_simulator();
295        let plan = json!({
296            "annotation": {
297                "impacts": [
298                    {"description": "speculative", "confidence": 0.3}
299                ],
300                "risks": [
301                    {"likelihood": "likely", "description": "market shift"}
302                ]
303            }
304        });
305
306        let result = sim.simulate(&plan);
307        assert!(!result.passed);
308        assert!(result.confidence < 0.6);
309    }
310
311    #[test]
312    fn empty_plan_uses_neutral_prior() {
313        let sim = default_simulator();
314        let plan = json!({});
315
316        let result = sim.simulate(&plan);
317        assert!(!result.passed); // 0.5 < 0.6 threshold
318        assert!((result.confidence - 0.5).abs() < f64::EPSILON);
319        assert!(result.findings[0].contains("neutral prior"));
320    }
321
322    #[test]
323    fn risks_reduce_confidence() {
324        let sim = default_simulator();
325
326        let plan_no_risk = json!({
327            "annotation": {
328                "impacts": [{"description": "growth", "confidence": 0.8}],
329                "risks": []
330            }
331        });
332        let plan_with_risk = json!({
333            "annotation": {
334                "impacts": [{"description": "growth", "confidence": 0.8}],
335                "risks": [
336                    {"likelihood": "very_likely", "description": "regulatory"},
337                    {"likelihood": "likely", "description": "competition"}
338                ]
339            }
340        });
341
342        let result_clean = sim.simulate(&plan_no_risk);
343        let result_risky = sim.simulate(&plan_with_risk);
344        assert!(result_clean.confidence > result_risky.confidence);
345    }
346
347    #[test]
348    fn samples_are_normalized() {
349        let sim = default_simulator();
350        let plan = json!({
351            "annotation": {
352                "impacts": [{"description": "ok", "confidence": 0.7}],
353                "risks": []
354            }
355        });
356
357        let result = sim.simulate(&plan);
358        assert!(!result.samples.is_empty());
359        let total: f64 = result.samples.iter().map(|s| s.probability).sum();
360        // Approximately 1.0 (rounding may cause small deviation)
361        assert!((total - 1.0).abs() < 0.01);
362    }
363
364    #[test]
365    fn custom_config() {
366        let sim = OutcomeSimulator::new(OutcomeSimulatorConfig {
367            samples: 100,
368            confidence_threshold: 0.9,
369            risk_weight: 0.5,
370        });
371        let plan = json!({
372            "annotation": {
373                "impacts": [{"description": "decent", "confidence": 0.8}],
374                "risks": []
375            }
376        });
377
378        let result = sim.simulate(&plan);
379        // 0.8 < 0.9 threshold with strict config
380        assert!(!result.passed);
381    }
382
383    #[test]
384    fn likelihood_variants() {
385        use crate::types::RiskLikelihood;
386        assert!((RiskLikelihood::VeryLikely.probability() - 0.9).abs() < f64::EPSILON);
387        assert!((RiskLikelihood::Unlikely.probability() - 0.15).abs() < f64::EPSILON);
388        assert_eq!(RiskLikelihood::from_str_lossy("unknown"), None);
389    }
390}