use crate::types::world::{ConversationRegime, DynamicsState, WorldModel};
pub const OPENING_MAX_TURNS: u32 = 2;
pub const DEEP_DIVE_MIN_CONSECUTIVE: u32 = 3;
pub const REGIME_REASSESS_PE_THRESHOLD: f64 = 1.5;
pub const DEPTH_INCREMENT: f64 = 0.15;
pub const DEPTH_DECAY_ON_SWITCH: f64 = 0.8;
pub const FAMILIAR_PE_THRESHOLD: f64 = 0.4;
pub const DEEP_FAMILIAR_PE_THRESHOLD: f64 = 0.25;
pub fn create_dynamics_state() -> DynamicsState {
DynamicsState::default()
}
pub fn detect_regime(model: &WorldModel, previous: Option<&DynamicsState>) -> DynamicsState {
if model.belief.turn <= OPENING_MAX_TURNS {
return DynamicsState {
regime: ConversationRegime::Opening,
depth: 0.0,
turns_in_regime: model.belief.turn,
accumulated_turn_pe: 0.0,
};
}
let prev = match previous {
Some(p) => p,
None => {
return DynamicsState {
regime: ConversationRegime::Exploration,
depth: 0.0,
turns_in_regime: 1,
accumulated_turn_pe: 0.0,
};
}
};
let new_accumulated_pe = prev.accumulated_turn_pe + model.sensory_pe;
let is_continuation =
model.turns_since_switch > 0 && model.sensory_pe < FAMILIAR_PE_THRESHOLD;
let new_depth = if is_continuation {
(prev.depth + DEPTH_INCREMENT).min(1.0)
} else {
prev.depth * (1.0 - DEPTH_DECAY_ON_SWITCH)
};
let should_reassess = new_accumulated_pe >= REGIME_REASSESS_PE_THRESHOLD
|| prev.regime == ConversationRegime::Opening
|| (model.turns_since_switch == 0 && prev.regime == ConversationRegime::DeepDive);
if !should_reassess {
return DynamicsState {
regime: prev.regime,
depth: new_depth,
turns_in_regime: prev.turns_in_regime + 1,
accumulated_turn_pe: new_accumulated_pe,
};
}
let new_regime = classify_regime(model, new_depth, prev);
let turns_in_regime = if new_regime == prev.regime {
prev.turns_in_regime + 1
} else {
1
};
DynamicsState {
regime: new_regime,
depth: new_depth,
turns_in_regime,
accumulated_turn_pe: 0.0, }
}
fn classify_regime(
model: &WorldModel,
depth: f64,
previous: &DynamicsState,
) -> ConversationRegime {
if model.turns_since_switch == 0 && previous.regime == ConversationRegime::DeepDive {
return ConversationRegime::Divergent;
}
if model.turns_since_switch >= DEEP_DIVE_MIN_CONSECUTIVE
&& depth > 0.4
&& model.sensory_pe < DEEP_FAMILIAR_PE_THRESHOLD
&& model.belief.affect.arousal < 0.4
{
return ConversationRegime::ProblemSolving;
}
if model.turns_since_switch >= DEEP_DIVE_MIN_CONSECUTIVE && depth > 0.3 {
return ConversationRegime::DeepDive;
}
ConversationRegime::Exploration
}
pub fn format_regime_for_llm(state: &DynamicsState) -> Option<String> {
match state.regime {
ConversationRegime::Opening => None, _ => Some(format!(
"Phase: {:?} (depth {:.1}, {} turns)",
state.regime, state.depth, state.turns_in_regime
)),
}
}
#[cfg(test)]
mod tests {
use super::*;
fn make_model(turn: u32, turns_since_switch: u32, sensory_pe: f64) -> WorldModel {
let mut model = WorldModel::new("test".into());
model.belief.turn = turn;
model.turns_since_switch = turns_since_switch;
model.sensory_pe = sensory_pe;
model
}
#[test]
fn opening_regime() {
let model = make_model(1, 0, 0.3);
let state = detect_regime(&model, None);
assert_eq!(state.regime, ConversationRegime::Opening);
}
#[test]
fn exploration_after_opening() {
let model = make_model(3, 1, 0.5);
let prev = DynamicsState {
regime: ConversationRegime::Opening,
depth: 0.0,
turns_in_regime: 2,
accumulated_turn_pe: 0.0,
};
let state = detect_regime(&model, Some(&prev));
assert_eq!(state.regime, ConversationRegime::Exploration);
}
#[test]
fn deep_dive_sustained_focus() {
let mut model = make_model(8, 5, 0.2);
model.belief.affect.arousal = 0.5;
let prev = DynamicsState {
regime: ConversationRegime::Exploration,
depth: 0.5,
turns_in_regime: 3,
accumulated_turn_pe: 2.0, };
let state = detect_regime(&model, Some(&prev));
assert_eq!(state.regime, ConversationRegime::DeepDive);
}
#[test]
fn problem_solving_calm_deep_familiar() {
let mut model = make_model(10, 5, 0.1);
model.belief.affect.arousal = 0.2;
let prev = DynamicsState {
regime: ConversationRegime::DeepDive,
depth: 0.6,
turns_in_regime: 4,
accumulated_turn_pe: 2.0,
};
let state = detect_regime(&model, Some(&prev));
assert_eq!(state.regime, ConversationRegime::ProblemSolving);
}
#[test]
fn divergent_after_deep_dive_switch() {
let model = make_model(10, 0, 0.8);
let prev = DynamicsState {
regime: ConversationRegime::DeepDive,
depth: 0.6,
turns_in_regime: 5,
accumulated_turn_pe: 0.5,
};
let state = detect_regime(&model, Some(&prev));
assert_eq!(state.regime, ConversationRegime::Divergent);
}
#[test]
fn sparse_update_no_reassess() {
let model = make_model(5, 3, 0.1);
let prev = DynamicsState {
regime: ConversationRegime::Exploration,
depth: 0.3,
turns_in_regime: 2,
accumulated_turn_pe: 0.2, };
let state = detect_regime(&model, Some(&prev));
assert_eq!(state.regime, ConversationRegime::Exploration);
assert_eq!(state.turns_in_regime, 3);
assert!(state.accumulated_turn_pe > 0.2);
}
#[test]
fn depth_increases_on_continuation() {
let model = make_model(5, 3, 0.1);
let prev = DynamicsState {
regime: ConversationRegime::Exploration,
depth: 0.3,
turns_in_regime: 2,
accumulated_turn_pe: 0.1,
};
let state = detect_regime(&model, Some(&prev));
assert!(state.depth > 0.3);
}
#[test]
fn format_opening_returns_none() {
let state = DynamicsState {
regime: ConversationRegime::Opening,
depth: 0.0,
turns_in_regime: 1,
accumulated_turn_pe: 0.0,
};
assert!(format_regime_for_llm(&state).is_none());
}
#[test]
fn format_deep_dive_returns_some() {
let state = DynamicsState {
regime: ConversationRegime::DeepDive,
depth: 0.6,
turns_in_regime: 5,
accumulated_turn_pe: 0.3,
};
let formatted = format_regime_for_llm(&state).unwrap();
assert!(formatted.contains("DeepDive"));
}
}