use approx::assert_abs_diff_eq;
use karma::{HiddenMarkovModel, HmmError};
use proptest::prelude::*;
#[test]
fn test_original_smoke_test() {
let mut hmm = HiddenMarkovModel::new(20, 10).unwrap();
hmm.train(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], None).unwrap();
hmm.train(&[0, 1, 4, 5, 7, 9, 0, 4, 7, 8, 2], Some(0.05))
.unwrap();
let prob = hmm.evaluate(&[5, 6, 7, 8]).unwrap();
assert!(prob > 0.0);
assert!(prob < 1.0);
}
#[test]
fn test_sequence_classification() {
let mut hmm = HiddenMarkovModel::new(10, 4).unwrap();
for _ in 0..20 {
hmm.train(&[0, 1, 0, 1, 0, 1], Some(0.1)).unwrap();
}
for _ in 0..20 {
hmm.train(&[0, 1, 2, 3, 0, 1], Some(0.1)).unwrap();
}
let prob_alternating = hmm.evaluate(&[0, 1, 0, 1]).unwrap();
let prob_sequential = hmm.evaluate(&[0, 1, 2, 3]).unwrap();
assert!(prob_alternating > 0.0 && prob_alternating <= 1.0);
assert!(prob_sequential > 0.0 && prob_sequential <= 1.0);
}
#[test]
fn test_convergence_behavior() {
let mut hmm = HiddenMarkovModel::new(5, 3).unwrap();
let training_sequence = vec![0, 1, 2, 0, 1, 2, 0, 1, 2];
let prob_before = hmm.evaluate(&training_sequence).unwrap();
for _ in 0..50 {
hmm.train(&training_sequence, Some(0.05)).unwrap();
}
let prob_after = hmm.evaluate(&training_sequence).unwrap();
assert!(
prob_after >= prob_before,
"Training should increase probability of training sequences"
);
}
#[test]
fn test_error_handling() {
let mut hmm = HiddenMarkovModel::new(3, 4).unwrap();
assert!(matches!(
hmm.train(&[0, 1, 5, 2], None),
Err(HmmError::InvalidObservation(5, 4))
));
assert!(matches!(
hmm.evaluate(&[0, 4, 1]),
Err(HmmError::InvalidObservation(4, 4))
));
assert!(matches!(
hmm.train(&[0, 1, 2], Some(-0.1)),
Err(HmmError::InvalidLearningRate(_))
));
assert!(matches!(
hmm.train(&[0, 1, 2], Some(1.5)),
Err(HmmError::InvalidLearningRate(_))
));
assert!(matches!(hmm.train(&[], None), Err(HmmError::EmptySequence)));
}
#[test]
fn test_builder_validation() {
let result = HiddenMarkovModel::builder().build();
assert!(result.is_err());
let result = HiddenMarkovModel::builder()
.states(3)
.observations(5)
.build();
assert!(result.is_ok());
let hmm = HiddenMarkovModel::builder()
.states(5)
.observations(3)
.randomize(true)
.build()
.unwrap();
assert_eq!(hmm.n_states(), 5);
assert_eq!(hmm.n_observations(), 3);
}
#[test]
fn test_probability_matrix_properties() {
let hmm = HiddenMarkovModel::new(4, 3).unwrap();
let init_sum: f64 = hmm.initial_probabilities().iter().sum();
assert_abs_diff_eq!(init_sum, 1.0, epsilon = 1e-10);
let transition = hmm.transition_probabilities();
for i in 0..hmm.n_states() {
let row_sum: f64 = (0..hmm.n_states())
.map(|j| transition[i * hmm.n_states() + j])
.sum();
assert_abs_diff_eq!(row_sum, 1.0, epsilon = 1e-10);
}
let emission = hmm.emission_probabilities();
for i in 0..hmm.n_states() {
let row_sum: f64 = (0..hmm.n_observations())
.map(|j| emission[i + j * hmm.n_states()])
.sum();
assert_abs_diff_eq!(row_sum, 1.0, epsilon = 1e-10);
}
}
#[test]
fn test_multiple_training_sessions() {
let mut hmm = HiddenMarkovModel::new(5, 4).unwrap();
hmm.train(&[0, 1, 2, 3], Some(0.1)).unwrap();
hmm.train(&[3, 2, 1, 0], Some(0.1)).unwrap();
hmm.train(&[0, 2, 1, 3], Some(0.1)).unwrap();
assert!(hmm.evaluate(&[1, 2, 3]).is_ok());
assert!(hmm.evaluate(&[0, 0, 0]).is_ok());
}
#[test]
fn test_long_sequence_handling() {
let mut hmm = HiddenMarkovModel::new(5, 3).unwrap();
let long_seq: Vec<usize> = (0..1000).map(|i| i % 3).collect();
assert!(hmm.train(&long_seq, None).is_ok());
assert!(hmm.evaluate(&long_seq).is_ok());
}
#[test]
fn test_single_observation_sequence() {
let mut hmm = HiddenMarkovModel::new(3, 4).unwrap();
assert!(hmm.train(&[0], None).is_ok());
assert!(hmm.train(&[1], None).is_ok());
assert!(hmm.train(&[2], None).is_ok());
let prob = hmm.evaluate(&[1]).unwrap();
assert!(prob > 0.0 && prob <= 1.0);
}
#[test]
fn test_randomization_changes_probabilities() {
let hmm1 = HiddenMarkovModel::new(10, 5).unwrap();
let initial_probs_before = hmm1.initial_probabilities().to_vec();
let mut hmm2 = hmm1.clone();
hmm2.randomize_initial_probabilities();
let initial_probs_after = hmm2.initial_probabilities();
let all_same = initial_probs_before
.iter()
.zip(initial_probs_after.iter())
.all(|(a, b)| (a - b).abs() < 1e-10);
assert!(
!all_same,
"Randomization should change at least some probabilities"
);
}
proptest! {
#[test]
fn prop_valid_model_creation(states in 1usize..50, obs in 1usize..50) {
let result = HiddenMarkovModel::new(states, obs);
prop_assert!(result.is_ok());
let hmm = result.unwrap();
prop_assert_eq!(hmm.n_states(), states);
prop_assert_eq!(hmm.n_observations(), obs);
}
#[test]
fn prop_probability_bounds(
states in 1usize..20,
obs in 1usize..20,
sequence in prop::collection::vec(0usize..10, 1..50)
) {
let mut hmm = HiddenMarkovModel::new(states, obs).unwrap();
let valid_seq: Vec<usize> = sequence.iter().map(|&x| x % obs).collect();
if hmm.train(&valid_seq, Some(0.05)).is_ok() {
if let Ok(prob) = hmm.evaluate(&valid_seq) {
prop_assert!(prob >= 0.0, "Probability must be non-negative");
prop_assert!(prob <= 1.0, "Probability must not exceed 1.0");
prop_assert!(prob.is_finite(), "Probability must be finite");
}
}
}
#[test]
fn prop_training_with_valid_data_succeeds(
states in 1usize..20,
obs in 1usize..10,
seq_len in 1usize..100,
learning_rate in 0.01f64..1.0
) {
let mut hmm = HiddenMarkovModel::new(states, obs).unwrap();
let sequence: Vec<usize> = (0..seq_len).map(|i| i % obs).collect();
let result = hmm.train(&sequence, Some(learning_rate));
prop_assert!(result.is_ok());
}
#[test]
fn prop_out_of_bounds_observation_fails(
states in 1usize..20,
obs in 1usize..10,
invalid_obs in 10usize..100
) {
let mut hmm = HiddenMarkovModel::new(states, obs).unwrap();
let sequence = vec![0, 1, invalid_obs];
prop_assert!(hmm.train(&sequence, None).is_err());
prop_assert!(hmm.evaluate(&sequence).is_err());
}
#[test]
fn prop_probability_matrices_valid_after_training(
states in 2usize..10,
obs in 2usize..10,
n_iterations in 1usize..20
) {
let mut hmm = HiddenMarkovModel::new(states, obs).unwrap();
for _ in 0..n_iterations {
let seq: Vec<usize> = (0..10).map(|i| i % obs).collect();
let _ = hmm.train(&seq, Some(0.05));
}
let init_sum: f64 = hmm.initial_probabilities().iter().sum();
prop_assert!((init_sum - 1.0).abs() < 0.1,
"Initial probabilities should sum to ~1, got {}", init_sum);
for &p in hmm.initial_probabilities() {
prop_assert!(p >= 0.0, "Probabilities must be non-negative");
}
for &p in hmm.transition_probabilities() {
prop_assert!(p >= -1e-10, "Probabilities must be non-negative");
}
for &p in hmm.emission_probabilities() {
prop_assert!(p >= -1e-10, "Probabilities must be non-negative");
}
}
#[test]
fn prop_empty_sequence_returns_one(states in 1usize..20, obs in 1usize..20) {
let mut hmm = HiddenMarkovModel::new(states, obs).unwrap();
let prob = hmm.evaluate(&[]).unwrap();
prop_assert_eq!(prob, 1.0);
}
#[test]
fn prop_clone_creates_equal_model(states in 1usize..20, obs in 1usize..20) {
let hmm1 = HiddenMarkovModel::new(states, obs).unwrap();
let hmm2 = hmm1.clone();
prop_assert_eq!(hmm1, hmm2);
}
}
#[cfg(feature = "serde")]
#[test]
fn test_serialization() {
use serde_json;
let mut hmm = HiddenMarkovModel::new(5, 3).unwrap();
hmm.train(&[0, 1, 2, 0, 1, 2], Some(0.1)).unwrap();
let serialized = serde_json::to_string(&hmm).unwrap();
let mut deserialized: HiddenMarkovModel = serde_json::from_str(&serialized).unwrap();
assert_eq!(hmm, deserialized);
let prob1 = hmm.evaluate(&[0, 1, 2]).unwrap();
let prob2 = deserialized.evaluate(&[0, 1, 2]).unwrap();
assert_abs_diff_eq!(prob1, prob2, epsilon = 1e-10);
}