Skip to main content

organism_simulation/
causal.rs

1//! Causal simulator.
2//!
3//! Evaluates candidate plans for causal reasoning quality: are the assumed
4//! cause-effect relationships supported by evidence, or do they conflate
5//! correlation with causation? Checks for confounders, missing links, and
6//! circular reasoning.
7
8use crate::{DimensionResult, Sample, SimulationDimension};
9
10#[derive(Debug, Clone)]
11pub struct CausalSimulatorConfig {
12    pub min_evidence_links: u32,
13    pub confounder_penalty: f64,
14    pub confidence_threshold: f64,
15}
16
17impl Default for CausalSimulatorConfig {
18    fn default() -> Self {
19        Self {
20            min_evidence_links: 1,
21            confounder_penalty: 0.2,
22            confidence_threshold: 0.5,
23        }
24    }
25}
26
27pub struct CausalSimulator {
28    config: CausalSimulatorConfig,
29}
30
31impl CausalSimulator {
32    #[must_use]
33    pub fn new(config: CausalSimulatorConfig) -> Self {
34        Self { config }
35    }
36
37    fn extract_causal_claims(plan: &serde_json::Value) -> Vec<CausalClaim> {
38        plan.get("annotation")
39            .and_then(|a| a.get("causal_claims"))
40            .and_then(|c| c.as_array())
41            .map(|arr| {
42                arr.iter()
43                    .filter_map(|v| {
44                        Some(CausalClaim {
45                            cause: v.get("cause").and_then(|c| c.as_str())?.to_string(),
46                            effect: v.get("effect").and_then(|e| e.as_str())?.to_string(),
47                            evidence_count: v
48                                .get("evidence_count")
49                                .and_then(serde_json::Value::as_u64)
50                                .map_or(0, |n| u32::try_from(n).unwrap_or(0)),
51                            confounders: v
52                                .get("confounders")
53                                .and_then(|c| c.as_array())
54                                .map(|arr| {
55                                    arr.iter()
56                                        .filter_map(|s| s.as_str().map(String::from))
57                                        .collect()
58                                })
59                                .unwrap_or_default(),
60                        })
61                    })
62                    .collect()
63            })
64            .unwrap_or_default()
65    }
66
67    fn extract_assumptions(plan: &serde_json::Value) -> Vec<String> {
68        plan.get("annotation")
69            .and_then(|a| a.get("assumptions"))
70            .and_then(|a| a.as_array())
71            .map(|arr| {
72                arr.iter()
73                    .filter_map(|v| v.as_str().map(String::from))
74                    .collect()
75            })
76            .unwrap_or_default()
77    }
78
79    fn sample(confidence: f64) -> Vec<Sample> {
80        let buckets = 5;
81        let mut samples = Vec::with_capacity(buckets);
82
83        for i in 0..buckets {
84            let bucket_center = (f64::from(u32::try_from(i).unwrap_or(0)) + 0.5)
85                / f64::from(u32::try_from(buckets).unwrap_or(5));
86            let distance = (bucket_center - confidence).abs();
87            let weight = (-distance * 4.0).exp();
88            samples.push(Sample {
89                value: bucket_center,
90                probability: weight,
91            });
92        }
93
94        let total: f64 = samples.iter().map(|s| s.probability).sum();
95        if total > 0.0 {
96            for s in &mut samples {
97                s.probability /= total;
98            }
99        }
100
101        samples
102    }
103
104    pub fn simulate(&self, plan: &serde_json::Value) -> DimensionResult {
105        let claims = Self::extract_causal_claims(plan);
106        let assumptions = Self::extract_assumptions(plan);
107
108        let mut findings = Vec::new();
109        let mut weak_claims = 0u32;
110        let mut confounded_claims = 0u32;
111        let total_claims = u32::try_from(claims.len()).unwrap_or(0);
112
113        if claims.is_empty() && assumptions.is_empty() {
114            findings.push("no causal claims or assumptions declared — cannot assess".into());
115            return DimensionResult {
116                dimension: SimulationDimension::Causal,
117                passed: true, // no claims = nothing to challenge
118                confidence: 0.5,
119                findings,
120                samples: Self::sample(0.5),
121            };
122        }
123
124        for claim in &claims {
125            if claim.evidence_count < self.config.min_evidence_links {
126                findings.push(format!(
127                    "weak: '{}' → '{}' has {} evidence link(s), need {}",
128                    claim.cause, claim.effect, claim.evidence_count, self.config.min_evidence_links,
129                ));
130                weak_claims += 1;
131            }
132
133            if !claim.confounders.is_empty() {
134                findings.push(format!(
135                    "confounders on '{}' → '{}': {}",
136                    claim.cause,
137                    claim.effect,
138                    claim.confounders.join(", "),
139                ));
140                confounded_claims += 1;
141            }
142        }
143
144        if !assumptions.is_empty() {
145            findings.push(format!("{} unstated assumptions noted", assumptions.len()));
146        }
147
148        // Check for circular reasoning (A→B and B→A)
149        for (i, a) in claims.iter().enumerate() {
150            for b in claims.iter().skip(i + 1) {
151                if a.cause == b.effect && a.effect == b.cause {
152                    findings.push(format!(
153                        "circular: '{}' ↔ '{}' — mutual causation claimed",
154                        a.cause, a.effect,
155                    ));
156                    weak_claims += 1;
157                }
158            }
159        }
160
161        let weakness_ratio = if total_claims == 0 {
162            0.0
163        } else {
164            f64::from(weak_claims) / f64::from(total_claims)
165        };
166        let confounder_penalty = f64::from(confounded_claims) * self.config.confounder_penalty;
167
168        let confidence = (1.0 - weakness_ratio - confounder_penalty).clamp(0.0, 1.0);
169        let passed = confidence >= self.config.confidence_threshold;
170        let samples = Self::sample(confidence);
171
172        if !passed {
173            findings.push(format!(
174                "below threshold: {confidence:.2} < {:.2}",
175                self.config.confidence_threshold,
176            ));
177        }
178
179        DimensionResult {
180            dimension: SimulationDimension::Causal,
181            passed,
182            confidence,
183            findings,
184            samples,
185        }
186    }
187}
188
189struct CausalClaim {
190    cause: String,
191    effect: String,
192    evidence_count: u32,
193    confounders: Vec<String>,
194}
195
196// ── Suggestor Implementation ──────────────────────────────────────
197
198use crate::types::{SimulationRecommendation, SimulationVerdict};
199use converge_pack::{AgentEffect, Context, ContextKey, ProposedFact, Suggestor};
200
201pub struct CausalSimulationAgent {
202    simulator: CausalSimulator,
203}
204
205impl CausalSimulationAgent {
206    #[must_use]
207    pub fn new(config: CausalSimulatorConfig) -> Self {
208        Self {
209            simulator: CausalSimulator::new(config),
210        }
211    }
212
213    #[must_use]
214    pub fn default_config() -> Self {
215        Self {
216            simulator: CausalSimulator::new(CausalSimulatorConfig::default()),
217        }
218    }
219}
220
221#[async_trait::async_trait]
222#[allow(clippy::unnecessary_literal_bound)]
223impl Suggestor for CausalSimulationAgent {
224    fn name(&self) -> &'static str {
225        "causal-simulation"
226    }
227
228    fn dependencies(&self) -> &[ContextKey] {
229        &[ContextKey::Strategies]
230    }
231
232    fn accepts(&self, ctx: &dyn Context) -> bool {
233        ctx.has(ContextKey::Strategies) && !ctx.has(ContextKey::Evaluations)
234    }
235
236    async fn execute(&self, ctx: &dyn Context) -> AgentEffect {
237        let strategies = ctx.get(ContextKey::Strategies);
238        let mut proposals = Vec::new();
239
240        for fact in strategies {
241            let plan_json: serde_json::Value = serde_json::from_str(&fact.content)
242                .unwrap_or_else(|_| serde_json::json!({"description": fact.content}));
243
244            let result = self.simulator.simulate(&plan_json);
245
246            let verdict = SimulationVerdict {
247                strategy_id: fact.id.clone(),
248                dimension: crate::SimulationDimension::Causal,
249                passed: result.passed,
250                confidence: result.confidence,
251                findings: result.findings,
252                recommendation: if result.passed {
253                    None
254                } else {
255                    Some(SimulationRecommendation::DoNotProceed)
256                },
257            };
258
259            let key = if result.passed {
260                ContextKey::Evaluations
261            } else {
262                ContextKey::Constraints
263            };
264
265            proposals.push(ProposedFact::new(
266                key,
267                verdict.fact_id(),
268                verdict.to_json(),
269                "causal-simulation",
270            ));
271        }
272
273        AgentEffect::with_proposals(proposals)
274    }
275}
276
277#[cfg(test)]
278mod tests {
279    use super::*;
280    use serde_json::json;
281
282    fn default_simulator() -> CausalSimulator {
283        CausalSimulator::new(CausalSimulatorConfig::default())
284    }
285
286    #[test]
287    fn strong_evidence_passes() {
288        let sim = default_simulator();
289        let plan = json!({
290            "annotation": {
291                "causal_claims": [{
292                    "cause": "training",
293                    "effect": "productivity",
294                    "evidence_count": 5,
295                    "confounders": []
296                }]
297            }
298        });
299        let result = sim.simulate(&plan);
300        assert_eq!(result.dimension, SimulationDimension::Causal);
301        assert!(result.passed);
302        assert!(result.confidence > 0.8);
303    }
304
305    #[test]
306    fn weak_evidence_penalized() {
307        let sim = CausalSimulator::new(CausalSimulatorConfig {
308            min_evidence_links: 3,
309            ..CausalSimulatorConfig::default()
310        });
311        let plan = json!({
312            "annotation": {
313                "causal_claims": [{
314                    "cause": "marketing",
315                    "effect": "sales",
316                    "evidence_count": 1,
317                    "confounders": []
318                }]
319            }
320        });
321        let result = sim.simulate(&plan);
322        assert!(!result.passed);
323        assert!(result.findings.iter().any(|f| f.contains("weak")));
324    }
325
326    #[test]
327    fn confounders_reduce_confidence() {
328        let sim = default_simulator();
329        let plan_clean = json!({
330            "annotation": {
331                "causal_claims": [{
332                    "cause": "training",
333                    "effect": "output",
334                    "evidence_count": 5,
335                    "confounders": []
336                }]
337            }
338        });
339        let plan_confounded = json!({
340            "annotation": {
341                "causal_claims": [{
342                    "cause": "training",
343                    "effect": "output",
344                    "evidence_count": 5,
345                    "confounders": ["seasonal_demand", "new_tools"]
346                }]
347            }
348        });
349        let clean = sim.simulate(&plan_clean);
350        let confounded = sim.simulate(&plan_confounded);
351        assert!(clean.confidence > confounded.confidence);
352    }
353
354    #[test]
355    fn circular_reasoning_detected() {
356        let sim = default_simulator();
357        let plan = json!({
358            "annotation": {
359                "causal_claims": [
360                    {"cause": "A", "effect": "B", "evidence_count": 2, "confounders": []},
361                    {"cause": "B", "effect": "A", "evidence_count": 2, "confounders": []}
362                ]
363            }
364        });
365        let result = sim.simulate(&plan);
366        assert!(result.findings.iter().any(|f| f.contains("circular")));
367    }
368
369    #[test]
370    fn no_claims_passes_vacuously() {
371        let sim = default_simulator();
372        let plan = json!({});
373        let result = sim.simulate(&plan);
374        assert!(result.passed);
375        assert!((result.confidence - 0.5).abs() < f64::EPSILON);
376    }
377
378    #[test]
379    fn assumptions_noted() {
380        let sim = default_simulator();
381        let plan = json!({
382            "annotation": {
383                "causal_claims": [{
384                    "cause": "X",
385                    "effect": "Y",
386                    "evidence_count": 3,
387                    "confounders": []
388                }],
389                "assumptions": ["stable market", "no regulation changes"]
390            }
391        });
392        let result = sim.simulate(&plan);
393        assert!(result.findings.iter().any(|f| f.contains("assumptions")));
394    }
395}