use crate::cognition::belief_state::update_affect;
use crate::cognition::detector::{detect_response_strategy, extract_topics};
use crate::math::clamp;
use crate::types::belief::TopicBeliefs;
use crate::types::world::*;
const VOLATILE_LEARNING_BOOST: f64 = 1.5;
const STABLE_LEARNING_DAMPEN: f64 = 0.6;
const VOLATILITY_BOOST_THRESHOLD: f64 = 0.5;
const VOLATILITY_DAMPEN_THRESHOLD: f64 = 0.2;
const PE_DEPLETION_RATE: f64 = 0.02;
const AROUSAL_DEPLETION_RATE: f64 = 0.01;
const NATURAL_REPLENISHMENT_RATE: f64 = 0.005;
const POSITIVE_RPE_REPLENISHMENT_RATE: f64 = 0.05;
const NEGATIVE_RPE_DEPLETION_RATE: f64 = 0.02;
fn volatility_modulated_rate(base_rate: f64, pe_volatility: f64) -> f64 {
if pe_volatility > VOLATILITY_BOOST_THRESHOLD {
base_rate * VOLATILE_LEARNING_BOOST
} else if pe_volatility < VOLATILITY_DAMPEN_THRESHOLD {
base_rate * STABLE_LEARNING_DAMPEN
} else {
base_rate
}
}
pub fn perceive(model: &WorldModel, message: &str) -> WorldModel {
let mut updated = model.clone();
updated.belief.affect = update_affect(&model.belief, message);
let cluster_topics = extract_topics(message);
updated.belief.topic = TopicBeliefs {
current: cluster_topics,
predicted: model.belief.topic.predicted.clone(),
};
updated.sensory_pe = clamp(
(updated.belief.affect.arousal - model.belief.affect.arousal).abs(),
0.0,
1.0,
);
updated.belief.turn += 1;
updated.turns_since_switch += 1;
updated.pe_history.push(updated.sensory_pe);
if updated.pe_history.len() > PE_VOLATILITY_WINDOW {
updated.pe_history.remove(0);
}
updated.pe_volatility = compute_pe_volatility(&updated.pe_history);
let depletion = updated.sensory_pe * PE_DEPLETION_RATE
+ updated.belief.affect.arousal * AROUSAL_DEPLETION_RATE;
updated.body_budget = clamp(
updated.body_budget - depletion + NATURAL_REPLENISHMENT_RATE,
0.0,
1.0,
);
let topic_cluster = crate::cognition::detector::build_topic_cluster(&updated.belief.topic.current);
if !topic_cluster.is_empty() {
updated.recommended_strategy =
get_recommended_strategy(&topic_cluster, &updated.learned).map(|(s, _)| s);
}
updated
}
fn compute_pe_volatility(history: &[f64]) -> f64 {
if history.len() < 2 {
return 0.0;
}
let mean = history.iter().sum::<f64>() / history.len() as f64;
let variance = history.iter().map(|x| (x - mean).powi(2)).sum::<f64>() / history.len() as f64;
clamp(variance.sqrt(), 0.0, 1.0)
}
pub fn consolidate(
model: &WorldModel,
response_content: &str,
response_quality: f64,
) -> WorldModel {
let mut updated = model.clone();
updated.belief.predictions = crate::cognition::belief_state::update_predictions(response_content);
updated.response_rpe = response_quality - model.last_response_prediction;
updated.last_response_prediction = clamp(
(1.0 - RESPONSE_SUCCESS_EMA) * model.last_response_prediction
+ RESPONSE_SUCCESS_EMA * response_quality,
0.0,
1.0,
);
let response_topics = extract_topics(response_content);
for topic in &response_topics {
updated
.discussed_topics
.insert(topic.clone(), updated.belief.turn);
}
if updated.response_rpe > 0.0 {
updated.body_budget = clamp(
updated.body_budget + updated.response_rpe * POSITIVE_RPE_REPLENISHMENT_RATE,
0.0,
1.0,
);
} else {
updated.body_budget = clamp(
updated.body_budget + updated.response_rpe * NEGATIVE_RPE_DEPLETION_RATE,
0.0,
1.0,
);
}
let detected_safe = crate::cognition::detector::detect_response_strategy_safe(response_content);
let actual_strategy = detect_response_strategy(response_content);
updated.last_response_strategy = Some(actual_strategy);
updated.last_response_length = Some(response_content.len());
updated.last_response_question_ratio = Some(
crate::cognition::detector::compute_question_ratio(response_content),
);
let learning_rate = volatility_modulated_rate(RESPONSE_SUCCESS_EMA, model.pe_volatility);
let topic_cluster = crate::cognition::detector::build_topic_cluster(&model.belief.topic.current);
if !topic_cluster.is_empty() {
let entry = updated.learned.response_success
.entry(topic_cluster.clone())
.or_insert(SuccessEntry { success_rate: 0.5, count: 0 });
entry.success_rate = (1.0 - learning_rate) * entry.success_rate
+ learning_rate * response_quality;
entry.count += 1;
if let Some(detected) = detected_safe {
let strategy_key = format!("{:?}", detected);
let strategy_map = updated.learned.response_strategies
.entry(topic_cluster.clone())
.or_default();
let strategy_entry = strategy_map
.entry(strategy_key)
.or_insert(SuccessEntry { success_rate: 0.5, count: 0 });
strategy_entry.success_rate = (1.0 - learning_rate) * strategy_entry.success_rate
+ learning_rate * response_quality;
strategy_entry.count += 1;
}
}
let compliance = match (&model.recommended_strategy, &updated.last_response_strategy) {
(Some(recommended), Some(actual)) => {
if recommended == actual {
StrategyCompliance::Compliant
} else if response_quality > 0.6 {
StrategyCompliance::DeviatedBetter
} else {
StrategyCompliance::DeviatedWorse
}
}
_ => StrategyCompliance::NoRecommendation,
};
updated.last_compliance = Some(compliance);
updated
}
const IDLE_REPLENISHMENT_RATE: f64 = 0.01;
pub fn maintain(model: &WorldModel) -> WorldModel {
let mut updated = model.clone();
updated.body_budget = clamp(
updated.body_budget + IDLE_REPLENISHMENT_RATE,
0.0,
1.0,
);
updated.idle_cycles += 1;
updated
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashMap;
#[test]
fn perceive_updates_affect() {
let model = WorldModel::new("test".into());
let updated = perceive(&model, "This is amazing!!!");
assert!(updated.belief.affect.arousal > model.belief.affect.arousal);
}
#[test]
fn perceive_sensory_pe_from_arousal_delta() {
let model = WorldModel::new("test".into());
let updated = perceive(&model, "This is TERRIBLE and frustrating!!!");
assert!(updated.sensory_pe > 0.0);
}
#[test]
fn perceive_sensory_pe_stable_when_calm() {
let model = WorldModel::new("test".into());
let updated = perceive(&model, "Hello world");
assert!(updated.sensory_pe < 0.1);
}
#[test]
fn perceive_increments_turn() {
let model = WorldModel::new("test".into());
let updated = perceive(&model, "test");
assert_eq!(updated.belief.turn, 1);
}
#[test]
fn pe_volatility_stable() {
let history = vec![0.3, 0.3, 0.3, 0.3, 0.3];
let vol = compute_pe_volatility(&history);
assert!(vol < 0.01); }
#[test]
fn pe_volatility_unstable() {
let history = vec![0.1, 0.9, 0.1, 0.9, 0.1];
let vol = compute_pe_volatility(&history);
assert!(vol > 0.3); }
#[test]
fn consolidate_updates_predictions() {
let model = WorldModel::new("test".into());
let updated = consolidate(&model, "The Rust compiler is great for safety", 0.8);
assert!(!updated.belief.predictions.next_topics.is_empty());
}
#[test]
fn consolidate_computes_rpe() {
let mut model = WorldModel::new("test".into());
model.last_response_prediction = 0.5;
let updated = consolidate(&model, "Good response", 0.8);
assert!((updated.response_rpe - 0.3).abs() < f64::EPSILON);
}
#[test]
fn consolidate_tracks_discussed_topics() {
let model = WorldModel::new("test".into());
let updated = consolidate(&model, "Let's discuss Rust and async", 0.7);
assert!(!updated.discussed_topics.is_empty());
}
#[test]
fn volatile_environment_learns_faster() {
let rate_volatile = volatility_modulated_rate(0.25, 0.7); let rate_normal = volatility_modulated_rate(0.25, 0.35); let rate_stable = volatility_modulated_rate(0.25, 0.1); assert!(
rate_volatile > rate_normal,
"Volatile environment should produce higher learning rate"
);
assert!(
rate_normal > rate_stable,
"Stable environment should produce lower learning rate"
);
}
#[test]
fn consolidate_uses_volatility_for_learning() {
let mut volatile_model = WorldModel::new("test".into());
volatile_model.pe_volatility = 0.8;
volatile_model.last_response_prediction = 0.5;
let volatile_updated = consolidate(&volatile_model, "Step 1. Step 2. Step 3.", 0.9);
let mut stable_model = WorldModel::new("test".into());
stable_model.pe_volatility = 0.1;
stable_model.last_response_prediction = 0.5;
let stable_updated = consolidate(&stable_model, "Step 1. Step 2. Step 3.", 0.9);
let volatile_shift = (volatile_updated.last_response_prediction - 0.5).abs();
let stable_shift = (stable_updated.last_response_prediction - 0.5).abs();
assert!(volatile_shift > 0.0);
assert!(stable_shift > 0.0);
}
#[test]
fn volatility_modulated_rate_boundaries() {
let at_boost = volatility_modulated_rate(0.25, VOLATILITY_BOOST_THRESHOLD + 0.01);
assert!((at_boost - 0.25 * VOLATILE_LEARNING_BOOST).abs() < 0.01);
let at_dampen = volatility_modulated_rate(0.25, VOLATILITY_DAMPEN_THRESHOLD - 0.01);
assert!((at_dampen - 0.25 * STABLE_LEARNING_DAMPEN).abs() < 0.01);
}
#[test]
fn perceive_sets_recommended_strategy_from_learned() {
let mut model = WorldModel::new("test".into());
let mut strategy_map = HashMap::new();
strategy_map.insert(
"StepByStep".into(),
SuccessEntry {
success_rate: 0.8,
count: 10,
},
);
model
.learned
.response_strategies
.insert("async+rust".into(), strategy_map);
let updated = perceive(&model, "How do I use async in Rust?");
assert_eq!(
updated.recommended_strategy,
Some(ResponseStrategy::StepByStep),
"Should recommend learned strategy for topic cluster"
);
}
#[test]
fn perceive_no_recommendation_without_learned() {
let model = WorldModel::new("test".into());
let updated = perceive(&model, "Hello world");
assert_eq!(updated.recommended_strategy, None);
}
#[test]
fn get_recommended_strategy_picks_highest_success() {
let mut learned = LearnedState::default();
let mut strategies = HashMap::new();
strategies.insert(
"DirectAnswer".into(),
SuccessEntry {
success_rate: 0.6,
count: 8,
},
);
strategies.insert(
"StepByStep".into(),
SuccessEntry {
success_rate: 0.85,
count: 10,
},
);
learned
.response_strategies
.insert("test+topic".into(), strategies);
let result = get_recommended_strategy("test+topic", &learned);
assert!(result.is_some());
let (strategy, _) = result.unwrap();
assert_eq!(strategy, ResponseStrategy::StepByStep);
}
#[test]
fn get_recommended_strategy_skips_avoid() {
let mut learned = LearnedState::default();
let mut strategies = HashMap::new();
strategies.insert(
"ClarifyFirst".into(),
SuccessEntry {
success_rate: 0.2, count: 10,
},
);
strategies.insert(
"DirectAnswer".into(),
SuccessEntry {
success_rate: 0.7,
count: 8,
},
);
learned
.response_strategies
.insert("test+topic".into(), strategies);
let result = get_recommended_strategy("test+topic", &learned);
assert!(result.is_some());
let (strategy, _) = result.unwrap();
assert_eq!(
strategy,
ResponseStrategy::DirectAnswer,
"Should skip avoided strategy"
);
}
#[test]
fn get_recommended_strategy_none_for_weak_data() {
let mut learned = LearnedState::default();
let mut strategies = HashMap::new();
strategies.insert(
"StepByStep".into(),
SuccessEntry {
success_rate: 0.9,
count: 2, },
);
learned
.response_strategies
.insert("test+topic".into(), strategies);
let result = get_recommended_strategy("test+topic", &learned);
assert!(result.is_none(), "Should return None for weak data");
}
#[test]
fn maintain_replenishes_body_budget() {
let mut model = WorldModel::new("test".into());
model.body_budget = 0.8; let updated = maintain(&model);
assert!(
updated.body_budget > model.body_budget,
"Idle maintenance should replenish body budget"
);
}
#[test]
fn maintain_increments_idle_cycles() {
let model = WorldModel::new("test".into());
let updated = maintain(&model);
assert_eq!(updated.idle_cycles, 1);
let updated2 = maintain(&updated);
assert_eq!(updated2.idle_cycles, 2);
}
}