vom_rs 0.3.0

A library for Probabilistic Finite Automata.
Documentation
use rand::Rng;

/// make room for a new state to be added ...
pub fn free_probability(mut probs: Vec<i32>, diff: i32) -> Vec<i32> {
    let mut rng = rand::thread_rng();

    // randomized start index ?
    let mut idx: usize = rng.gen::<usize>() % probs.len();

    for _ in 0..diff {
        if probs[idx] > 0 {
            probs[idx] -= 1;
        }

        idx += 1;
        if idx >= probs.len() {
            idx = 0;
        }
    }

    probs
}

/// rebalance a vector of probabilities in case it's
/// above the total probability of 100
pub fn rebalance(mut probs: Vec<i32>, target_sum: i32, blur_factor: f32) -> Vec<i32> {
    let total_prob: i32 = probs.iter().sum();

    let mut diff = target_sum - total_prob;

    let mut rng = rand::thread_rng();

    // randomized start index ?
    let mut idx: usize = rng.gen::<usize>() % probs.len();

    // distribute points if it falls short ...
    if diff > 0 {
        while diff > 0 {
            probs[idx] += 1;
            diff -= 1;

            idx += 1;
            if idx >= probs.len() {
                idx = 0;
            }
        }
    } else {
        // otherwise subtract them in a peculiar manner
        // not even sure if this will always terminate, but hey ...
        // no risk (well it worked in the past, so ... )
        while diff < 0 {
            if probs[idx] > 0 {
                probs[idx] -= 1;
                diff += 1;
            } else {
                probs[idx] += 1;
                diff -= 1;
            }

            idx += 1;
            if idx >= probs.len() {
                idx = 0;
            }
        }
    }

    blur(probs, blur_factor)
}

pub fn rebalance_float(mut probs: Vec<f32>, target_sum: f32, blur: f32) -> Vec<f32> {
    let mut probs_int: Vec<i32> = Vec::new();
    let target_sum_scaled: f32 = target_sum * 1000.0; // should be enough resolution ?
    for p in probs.iter() {
        probs_int.push((p * 1000.0) as i32);
    }

    let rebalanced_int = rebalance(probs_int, target_sum_scaled as i32, blur);

    probs.clear();

    for p in rebalanced_int.iter() {
        probs.push(*p as f32 / 1000.0);
    }

    probs
}

pub fn free_probability_float(mut probs: Vec<f32>, target: f32) -> Vec<f32> {
    let mut probs_int: Vec<i32> = Vec::new();
    let target_sum_scaled: f32 = target * 1000.0; // should be enough resolution ?
    for p in probs.iter() {
        probs_int.push((p * 1000.0) as i32);
    }

    let rebalanced_int = free_probability(probs_int, target_sum_scaled as i32);

    probs.clear();

    for p in rebalanced_int.iter() {
        probs.push(*p as f32 / 1000.0);
    }

    probs
}

pub fn blur(mut probs: Vec<i32>, blur: f32) -> Vec<i32> {
    // clamp, otherwise we'll run into problems
    let blur_clamped = num::clamp(blur, 0.0, 1.0);

    if blur_clamped > 0.0 {
        // the threshold ...
        let min_points = ((100.0 / probs.len() as f32) * blur_clamped) as i32;
        let mut below_idx: Vec<usize> = Vec::new();
        let mut above_idx: Vec<usize> = Vec::new();
        for (p, prob) in probs.iter().enumerate() {
            if *prob < min_points {
                below_idx.push(p);
            } else {
                above_idx.push(p);
            }
        }

        // bring entries below threshold up to threshold, and
        // subtract the difference from entries above ..
        for b in below_idx.iter() {
            let diff = min_points - probs[*b];
            probs[*b] = min_points;
            let mut a_idx = 0;
            for _ in 0..diff {
                if probs[above_idx[a_idx]] > min_points {
                    probs[above_idx[a_idx]] -= 1;
                }
                a_idx += 1;
                if a_idx >= above_idx.len() {
                    a_idx = 0;
                }
            }
        }
    }

    probs
}

pub fn blur_float(mut probs: Vec<f32>, blurriness: f32) -> Vec<f32> {
    let mut probs_int: Vec<i32> = Vec::new();
    for p in probs.iter() {
        probs_int.push((p * 100.0) as i32);
    }

    let blur_int = blur(probs_int, blurriness);

    probs.clear();

    for p in blur_int.iter() {
        probs.push(*p as f32 / 100.0);
    }

    probs
}

/// sharpen .. assumes it's balanced, otherwise this might not work ...
pub fn sharpen(mut probs: Vec<i32>, sharpen: f32) -> Vec<i32> {
    let mut rng = rand::thread_rng();

    // clamp, otherwise we'll run into problems
    let sharpen_clamped = num::clamp(sharpen, 0.0, 1.0);

    let mut largest_idx: usize = 0;
    let mut largest_prob: i32 = 0;

    // linear search, vectors will be short ...
    for (p, prob) in probs.iter().enumerate() {
        if *prob > largest_prob {
            largest_prob = *prob;
            largest_idx = p;
        }
    }

    // nothing to sharpen in that case ...
    if largest_prob >= 100 {
        return probs;
    }

    let mut diff = ((100 - largest_prob) as f32 * sharpen_clamped) as i32;

    probs[largest_idx] += diff;

    // randomized start index ?
    let mut idx: usize = rng.gen::<usize>() % probs.len();

    while diff > 0 {
        if idx == largest_idx {
            idx += 1;
            if idx >= probs.len() {
                idx = 0;
            }
            continue;
        }

        if probs[idx] > 0 {
            probs[idx] -= 1;
        }

        diff -= 1;

        idx += 1;
        if idx >= probs.len() {
            idx = 0;
        }
    }

    probs
}

pub fn sharpen_float(mut probs: Vec<f32>, sharpness: f32) -> Vec<f32> {
    let mut probs_int: Vec<i32> = Vec::new();
    for p in probs.iter() {
        probs_int.push((p * 100.0) as i32);
    }

    let sharpen_int = sharpen(probs_int, sharpness);

    probs.clear();

    for p in sharpen_int.iter() {
        probs.push(*p as f32 / 100.0);
    }

    probs
}