use crate::flac::constants::{FIXED_COEFFICIENTS, MAX_FIXED_ORDER, MAX_LPC_ORDER};
use crate::flac::error::FlacError;
pub fn fixed_predictor_residual(samples: &[i32], order: usize) -> Result<Vec<i32>, FlacError> {
if order > MAX_FIXED_ORDER {
return Err(FlacError::InvalidFixedOrder { order: order as u8 });
}
if samples.len() < order {
return Err(FlacError::InvalidBlockSize {
size: samples.len() as u32,
});
}
let coeffs = &FIXED_COEFFICIENTS[order];
let mut residuals = Vec::with_capacity(samples.len() - order);
for i in order..samples.len() {
let mut prediction: i64 = 0;
for (j, &coeff) in coeffs[..order].iter().enumerate() {
prediction += coeff as i64 * samples[i - 1 - j] as i64;
}
residuals.push((samples[i] as i64 - prediction) as i32);
}
Ok(residuals)
}
pub fn fixed_predictor_restore(
warmup: &[i32],
residuals: &[i32],
order: usize,
) -> Result<Vec<i32>, FlacError> {
if order > MAX_FIXED_ORDER {
return Err(FlacError::InvalidFixedOrder { order: order as u8 });
}
if warmup.len() < order {
return Err(FlacError::InvalidBlockSize {
size: warmup.len() as u32,
});
}
let coeffs = &FIXED_COEFFICIENTS[order];
let mut samples = Vec::with_capacity(warmup.len() + residuals.len());
samples.extend_from_slice(&warmup[..order]);
for &residual in residuals {
let i = samples.len();
let mut prediction: i64 = 0;
for (j, &coeff) in coeffs[..order].iter().enumerate() {
prediction += coeff as i64 * samples[i - 1 - j] as i64;
}
samples.push((prediction + residual as i64) as i32);
}
Ok(samples)
}
pub fn find_best_fixed_order(samples: &[i32]) -> usize {
if samples.len() <= 4 {
return 0;
}
let mut best_order = 0;
let mut best_energy = u64::MAX;
for order in 0..=MAX_FIXED_ORDER.min(samples.len() - 1) {
if let Ok(residuals) = fixed_predictor_residual(samples, order) {
let energy: u64 = residuals.iter().map(|&r| (r as i64).unsigned_abs()).sum();
if energy < best_energy {
best_energy = energy;
best_order = order;
}
}
}
best_order
}
pub fn autocorrelation(samples: &[i32], max_order: usize) -> Vec<f64> {
let n = samples.len();
let mut r = vec![0.0f64; max_order + 1];
let x: Vec<f64> = samples.iter().map(|&s| s as f64).collect();
for lag in 0..=max_order {
let mut sum = 0.0f64;
for i in lag..n {
sum += x[i] * x[i - lag];
}
r[lag] = sum;
}
r
}
pub fn levinson_durbin(r: &[f64], order: usize) -> Result<(Vec<f64>, f64), FlacError> {
if r.is_empty() || r[0] <= 0.0 {
return Err(FlacError::LpcCoefficientOverflow);
}
let order = order.min(r.len() - 1);
let mut a = vec![0.0f64; order];
let mut a_prev = vec![0.0f64; order];
let mut error = r[0];
for i in 0..order {
let mut sum = r[i + 1];
for j in 0..i {
sum -= a_prev[j] * r[i - j];
}
if error.abs() < 1e-10 {
break;
}
let k = sum / error;
a[i] = k;
for j in 0..i {
a[j] = a_prev[j] - k * a_prev[i - 1 - j];
}
error *= 1.0 - k * k;
a_prev[..=i].copy_from_slice(&a[..=i]);
}
Ok((a, error))
}
pub fn compute_lpc_coefficients(samples: &[i32], order: usize) -> Result<Vec<f64>, FlacError> {
if order == 0 || order > MAX_LPC_ORDER {
return Err(FlacError::InvalidLpcOrder { order: order as u8 });
}
if samples.len() <= order {
return Err(FlacError::InvalidBlockSize {
size: samples.len() as u32,
});
}
let r = autocorrelation(samples, order);
let (coeffs, _error) = levinson_durbin(&r, order)?;
Ok(coeffs)
}
pub fn quantize_lpc_coefficients(
coeffs: &[f64],
precision: u8,
) -> Result<(Vec<i32>, i8), FlacError> {
if coeffs.is_empty() {
return Ok((vec![], 0));
}
let max_coeff = coeffs.iter().map(|c| c.abs()).fold(0.0f64, f64::max);
if max_coeff < 1e-10 {
return Ok((vec![0i32; coeffs.len()], 0));
}
let log2_max = max_coeff.log2();
let shift = (precision as i32 - 1) - log2_max.ceil() as i32;
let shift = shift.clamp(-16, 15) as i8;
let scale = 2.0f64.powi(shift as i32);
let max_val = (1i64 << (precision - 1)) - 1;
let min_val = -(1i64 << (precision - 1));
let mut quantized = Vec::with_capacity(coeffs.len());
for &c in coeffs {
let q = (c * scale).round() as i64;
let clamped = q.clamp(min_val, max_val) as i32;
quantized.push(clamped);
}
Ok((quantized, shift))
}
pub fn lpc_predictor_residual(
samples: &[i32],
qlp_coeffs: &[i32],
qlp_shift: i8,
) -> Result<Vec<i32>, FlacError> {
let order = qlp_coeffs.len();
if order == 0 || order > MAX_LPC_ORDER {
return Err(FlacError::InvalidLpcOrder { order: order as u8 });
}
if samples.len() < order {
return Err(FlacError::InvalidBlockSize {
size: samples.len() as u32,
});
}
if qlp_shift < 0 {
return Err(FlacError::InvalidLpcShift { shift: qlp_shift });
}
let mut residuals = Vec::with_capacity(samples.len() - order);
for i in order..samples.len() {
let mut prediction: i64 = 0;
for (j, &coeff) in qlp_coeffs.iter().enumerate() {
prediction += coeff as i64 * samples[i - 1 - j] as i64;
}
prediction >>= qlp_shift;
residuals.push((samples[i] as i64 - prediction) as i32);
}
Ok(residuals)
}
pub fn lpc_predictor_restore(
warmup: &[i32],
residuals: &[i32],
qlp_coeffs: &[i32],
qlp_shift: i8,
) -> Result<Vec<i32>, FlacError> {
let order = qlp_coeffs.len();
if order == 0 || order > MAX_LPC_ORDER {
return Err(FlacError::InvalidLpcOrder { order: order as u8 });
}
if warmup.len() < order {
return Err(FlacError::InvalidBlockSize {
size: warmup.len() as u32,
});
}
if qlp_shift < 0 {
return Err(FlacError::InvalidLpcShift { shift: qlp_shift });
}
let mut samples = Vec::with_capacity(order + residuals.len());
samples.extend_from_slice(&warmup[..order]);
for &residual in residuals {
let i = samples.len();
let mut prediction: i64 = 0;
for (j, &coeff) in qlp_coeffs.iter().enumerate() {
prediction += coeff as i64 * samples[i - 1 - j] as i64;
}
prediction >>= qlp_shift;
samples.push((prediction + residual as i64) as i32);
}
Ok(samples)
}
pub fn find_best_lpc_order(
samples: &[i32],
max_order: usize,
qlp_precision: u8,
exhaustive: bool,
) -> Result<(usize, Vec<i32>, i8), FlacError> {
let max_order = max_order.min(MAX_LPC_ORDER).min(samples.len() - 1);
if max_order == 0 {
return Ok((0, vec![], 0));
}
let r = autocorrelation(samples, max_order);
let mut best_order = 1;
let mut best_coeffs = vec![];
let mut best_shift = 0i8;
let mut best_bits = u64::MAX;
let orders: Vec<usize> = if exhaustive {
(1..=max_order).collect()
} else {
let mut o = vec![1];
if max_order >= 2 {
o.push(2);
}
if max_order >= 4 {
o.push(4);
}
if max_order >= 6 {
o.push(6);
}
if max_order >= 8 {
o.push(8);
}
if max_order >= 10 {
o.push(10);
}
if max_order >= 12 {
o.push(12);
}
o
};
for order in orders {
let (coeffs, _error) = levinson_durbin(&r, order)?;
let (qlp_coeffs, qlp_shift) = quantize_lpc_coefficients(&coeffs, qlp_precision)?;
let residuals = lpc_predictor_residual(samples, &qlp_coeffs, qlp_shift)?;
let residual_energy: u64 = residuals.iter().map(|&r| (r as i64).unsigned_abs()).sum();
let coeff_bits = order as u64 * qlp_precision as u64;
let estimated_bits = residual_energy + coeff_bits;
if estimated_bits < best_bits {
best_bits = estimated_bits;
best_order = order;
best_coeffs = qlp_coeffs;
best_shift = qlp_shift;
}
}
Ok((best_order, best_coeffs, best_shift))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_fixed_predictor_order_0() {
let samples = vec![10, 20, 30, 40, 50];
let residuals = fixed_predictor_residual(&samples, 0).unwrap();
assert_eq!(residuals, samples);
}
#[test]
fn test_fixed_predictor_order_1() {
let samples = vec![10, 20, 30, 40, 50];
let residuals = fixed_predictor_residual(&samples, 1).unwrap();
assert_eq!(residuals, vec![10, 10, 10, 10]); }
#[test]
fn test_fixed_predictor_order_2() {
let samples = vec![10, 15, 20, 25, 30];
let residuals = fixed_predictor_residual(&samples, 2).unwrap();
assert_eq!(residuals, vec![0, 0, 0]); }
#[test]
fn test_fixed_predictor_roundtrip() {
let original = vec![100, 150, 180, 200, 250, 280, 320];
for order in 0..=4 {
let residuals = fixed_predictor_residual(&original, order).unwrap();
let restored = fixed_predictor_restore(&original[..order], &residuals, order).unwrap();
assert_eq!(restored, original, "Roundtrip failed for order {}", order);
}
}
#[test]
fn test_autocorrelation() {
let samples = vec![1, 2, 3, 2, 1];
let r = autocorrelation(&samples, 2);
assert!((r[0] - 19.0).abs() < 1e-10); }
#[test]
fn test_levinson_durbin_basic() {
let r = vec![1.0, 0.5, 0.25];
let (coeffs, _error) = levinson_durbin(&r, 2).unwrap();
assert_eq!(coeffs.len(), 2);
}
#[test]
fn test_quantize_coefficients() {
let coeffs = vec![0.5, -0.25, 0.125];
let (quantized, shift) = quantize_lpc_coefficients(&coeffs, 12).unwrap();
assert_eq!(quantized.len(), 3);
for q in &quantized {
assert!(q.abs() < (1 << 11)); }
}
#[test]
fn test_lpc_predictor_roundtrip() {
let samples: Vec<i32> = (0..100)
.map(|i| (100.0 * (i as f64 * 0.1).sin()) as i32)
.collect();
let order = 4;
let coeffs = compute_lpc_coefficients(&samples, order).unwrap();
let (qlp_coeffs, qlp_shift) = quantize_lpc_coefficients(&coeffs, 12).unwrap();
let residuals = lpc_predictor_residual(&samples, &qlp_coeffs, qlp_shift).unwrap();
let restored =
lpc_predictor_restore(&samples[..order], &residuals, &qlp_coeffs, qlp_shift).unwrap();
assert_eq!(restored, samples);
}
#[test]
fn test_find_best_fixed_order() {
let linear: Vec<i32> = (0..100).map(|i| i * 10).collect();
let order = find_best_fixed_order(&linear);
assert!(order >= 2, "Linear signal should use order >= 2");
let constant: Vec<i32> = vec![42; 100];
let order = find_best_fixed_order(&constant);
assert!(order >= 1, "Constant signal should use order >= 1");
}
}