use crate::audio::Frame;
use crate::metadata::{
Application, BlockList, BlockSize, Cuesheet, Picture, PortableMetadataBlock, SeekPoint,
Streaminfo, VorbisComment, write_blocks,
};
use crate::stream::{ChannelAssignment, FrameNumber, Independent, SampleRate};
use crate::{Counter, Error};
use arrayvec::ArrayVec;
use bitstream_io::{BigEndian, BitRecorder, BitWrite, BitWriter, SignedBitCount};
use std::fs::File;
use std::io::BufWriter;
use std::num::NonZero;
use std::path::Path;
const MAX_CHANNELS: usize = 8;
const MAX_LPC_COEFFS: usize = 32;
macro_rules! arrayvec {
( $( $x:expr ),* ) => {
{
let mut v = ArrayVec::default();
$( v.push($x); )*
v
}
}
}
pub struct FlacByteWriter<W: std::io::Write + std::io::Seek, E: crate::byteorder::Endianness> {
encoder: Encoder<W>,
buf: Vec<u8>,
frame: Frame,
bytes_per_sample: usize,
pcm_frame_size: usize,
frame_byte_size: usize,
finalized: bool,
endianness: std::marker::PhantomData<E>,
}
impl<W: std::io::Write + std::io::Seek, E: crate::byteorder::Endianness> FlacByteWriter<W, E> {
pub fn new(
writer: W,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_bytes: Option<u64>,
) -> Result<Self, Error> {
let bits_per_sample: SignedBitCount<32> = bits_per_sample
.try_into()
.map_err(|_| Error::InvalidBitsPerSample)?;
let bytes_per_sample = u32::from(bits_per_sample).div_ceil(8) as usize;
let pcm_frame_size = bytes_per_sample * channels as usize;
Ok(Self {
buf: Vec::default(),
frame: Frame::empty(channels.into(), bits_per_sample.into()),
bytes_per_sample,
pcm_frame_size,
frame_byte_size: pcm_frame_size * options.block_size as usize,
encoder: Encoder::new(
writer,
options,
sample_rate,
bits_per_sample,
channels,
total_bytes
.map(|bytes| {
exact_div(bytes, channels.into())
.and_then(|s| exact_div(s, bytes_per_sample as u64))
.ok_or(Error::SamplesNotDivisibleByChannels)
.and_then(|b| NonZero::new(b).ok_or(Error::InvalidTotalBytes))
})
.transpose()?,
)?,
finalized: false,
endianness: std::marker::PhantomData,
})
}
pub fn new_cdda(writer: W, options: Options, total_bytes: Option<u64>) -> Result<Self, Error> {
Self::new(writer, options, 44100, 16, 2, total_bytes)
}
#[inline]
pub fn endian(
writer: W,
_endianness: E,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_bytes: Option<u64>,
) -> Result<Self, Error> {
Self::new(
writer,
options,
sample_rate,
bits_per_sample,
channels,
total_bytes,
)
}
fn finalize_inner(&mut self) -> Result<(), Error> {
if !self.finalized {
self.finalized = true;
if !self.buf.is_empty() {
use crate::byteorder::LittleEndian;
let buf = self.buf.as_mut_slice();
let buf_len = buf.len();
let buf = &mut buf[..(buf_len - buf_len % self.pcm_frame_size)];
E::bytes_to_le(buf, self.bytes_per_sample);
self.encoder.md5.consume(&buf);
self.encoder
.encode(self.frame.fill_from_buf::<LittleEndian>(buf))?;
}
self.encoder.finalize_inner()
} else {
Ok(())
}
}
pub fn finalize(mut self) -> Result<(), Error> {
self.finalize_inner()?;
Ok(())
}
}
impl<E: crate::byteorder::Endianness> FlacByteWriter<BufWriter<File>, E> {
#[inline]
pub fn create<P: AsRef<Path>>(
path: P,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_bytes: Option<u64>,
) -> Result<Self, Error> {
FlacByteWriter::new(
BufWriter::new(options.create(path)?),
options,
sample_rate,
bits_per_sample,
channels,
total_bytes,
)
}
pub fn create_cdda<P: AsRef<Path>>(
path: P,
options: Options,
total_bytes: Option<u64>,
) -> Result<Self, Error> {
Self::create(path, options, 44100, 16, 2, total_bytes)
}
}
impl<W: std::io::Write + std::io::Seek, E: crate::byteorder::Endianness> std::io::Write
for FlacByteWriter<W, E>
{
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
use crate::byteorder::LittleEndian;
self.buf.extend(buf);
let mut encoded_frames = 0;
for buf in self
.buf
.as_mut_slice()
.chunks_exact_mut(self.frame_byte_size)
{
E::bytes_to_le(buf, self.bytes_per_sample);
self.encoder.md5.consume(&buf);
self.encoder
.encode(self.frame.fill_from_buf::<LittleEndian>(buf))?;
encoded_frames += 1;
}
self.buf.drain(0..self.frame_byte_size * encoded_frames);
Ok(buf.len())
}
#[inline]
fn flush(&mut self) -> std::io::Result<()> {
self.encoder.writer.flush()
}
}
impl<W: std::io::Write + std::io::Seek, E: crate::byteorder::Endianness> Drop
for FlacByteWriter<W, E>
{
fn drop(&mut self) {
let _ = self.finalize_inner();
}
}
pub struct FlacSampleWriter<W: std::io::Write + std::io::Seek> {
encoder: Encoder<W>,
sample_buf: Vec<i32>,
frame: Frame,
frame_sample_size: usize,
pcm_frame_size: usize,
bytes_per_sample: usize,
finalized: bool,
}
impl<W: std::io::Write + std::io::Seek> FlacSampleWriter<W> {
pub fn new(
writer: W,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_samples: Option<u64>,
) -> Result<Self, Error> {
let bits_per_sample: SignedBitCount<32> = bits_per_sample
.try_into()
.map_err(|_| Error::InvalidBitsPerSample)?;
let bytes_per_sample = u32::from(bits_per_sample).div_ceil(8) as usize;
let pcm_frame_size = usize::from(channels);
Ok(Self {
sample_buf: Vec::default(),
frame: Frame::empty(channels.into(), bits_per_sample.into()),
bytes_per_sample,
pcm_frame_size,
frame_sample_size: pcm_frame_size * options.block_size as usize,
encoder: Encoder::new(
writer,
options,
sample_rate,
bits_per_sample,
channels,
total_samples
.map(|samples| {
exact_div(samples, channels.into())
.ok_or(Error::SamplesNotDivisibleByChannels)
.and_then(|s| NonZero::new(s).ok_or(Error::InvalidTotalSamples))
})
.transpose()?,
)?,
finalized: false,
})
}
pub fn new_cdda(
writer: W,
options: Options,
total_samples: Option<u64>,
) -> Result<Self, Error> {
Self::new(writer, options, 44100, 16, 2, total_samples)
}
pub fn write(&mut self, samples: &[i32]) -> Result<(), Error> {
self.sample_buf.extend(samples);
let mut encoded_frames = 0;
for buf in self
.sample_buf
.as_mut_slice()
.chunks_exact_mut(self.frame_sample_size)
{
update_md5(
&mut self.encoder.md5,
buf.iter().copied(),
self.bytes_per_sample,
);
self.encoder.encode(self.frame.fill_from_samples(buf))?;
encoded_frames += 1;
}
self.sample_buf
.drain(0..self.frame_sample_size * encoded_frames);
Ok(())
}
fn finalize_inner(&mut self) -> Result<(), Error> {
if !self.finalized {
self.finalized = true;
if !self.sample_buf.is_empty() {
let buf = self.sample_buf.as_mut_slice();
let buf_len = buf.len();
let buf = &mut buf[..(buf_len - buf_len % self.pcm_frame_size)];
update_md5(
&mut self.encoder.md5,
buf.iter().copied(),
self.bytes_per_sample,
);
self.encoder.encode(self.frame.fill_from_samples(buf))?;
}
self.encoder.finalize_inner()
} else {
Ok(())
}
}
pub fn finalize(mut self) -> Result<(), Error> {
self.finalize_inner()?;
Ok(())
}
}
impl FlacSampleWriter<BufWriter<File>> {
#[inline]
pub fn create<P: AsRef<Path>>(
path: P,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_samples: Option<u64>,
) -> Result<Self, Error> {
FlacSampleWriter::new(
BufWriter::new(options.create(path)?),
options,
sample_rate,
bits_per_sample,
channels,
total_samples,
)
}
#[inline]
pub fn create_cdda<P: AsRef<Path>>(
path: P,
options: Options,
total_samples: Option<u64>,
) -> Result<Self, Error> {
Self::create(path, options, 44100, 16, 2, total_samples)
}
}
pub struct FlacChannelWriter<W: std::io::Write + std::io::Seek> {
encoder: Encoder<W>,
channel_bufs: Vec<Vec<i32>>,
frame: Frame,
frame_sample_size: usize,
bytes_per_sample: usize,
finalized: bool,
}
impl<W: std::io::Write + std::io::Seek> FlacChannelWriter<W> {
pub fn new(
writer: W,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_samples: Option<u64>,
) -> Result<Self, Error> {
let bits_per_sample: SignedBitCount<32> = bits_per_sample
.try_into()
.map_err(|_| Error::InvalidBitsPerSample)?;
let bytes_per_sample = u32::from(bits_per_sample).div_ceil(8) as usize;
Ok(Self {
channel_bufs: vec![Vec::default(); channels.into()],
frame: Frame::empty(channels.into(), bits_per_sample.into()),
bytes_per_sample,
frame_sample_size: options.block_size as usize,
encoder: Encoder::new(
writer,
options,
sample_rate,
bits_per_sample,
channels,
total_samples.and_then(NonZero::new),
)?,
finalized: false,
})
}
pub fn new_cdda(
writer: W,
options: Options,
total_samples: Option<u64>,
) -> Result<Self, Error> {
Self::new(writer, options, 44100, 16, 2, total_samples)
}
pub fn write<C, S>(&mut self, channels: C) -> Result<(), Error>
where
C: AsRef<[S]>,
S: AsRef<[i32]>,
{
use crate::audio::MultiZip;
let channels = channels.as_ref();
match channels {
whole @ [first, rest @ ..]
if whole.len() == usize::from(self.encoder.channel_count().get()) =>
{
if rest
.iter()
.any(|c| c.as_ref().len() != first.as_ref().len())
{
return Err(Error::ChannelLengthMismatch);
}
}
_ => {
return Err(Error::ChannelCountMismatch);
}
}
for (buf, channel) in self.channel_bufs.iter_mut().zip(channels) {
buf.extend(channel.as_ref());
}
let mut encoded_frames = 0;
for bufs in self
.channel_bufs
.iter_mut()
.map(|v| v.as_mut_slice().chunks_exact_mut(self.frame_sample_size))
.collect::<MultiZip<_>>()
{
update_md5(
&mut self.encoder.md5,
bufs.iter()
.map(|c| c.iter().copied())
.collect::<MultiZip<_>>()
.flatten(),
self.bytes_per_sample,
);
self.encoder.encode(self.frame.fill_from_channels(&bufs))?;
encoded_frames += 1;
}
for channel in self.channel_bufs.iter_mut() {
channel.drain(0..self.frame_sample_size * encoded_frames);
}
Ok(())
}
fn finalize_inner(&mut self) -> Result<(), Error> {
use crate::audio::MultiZip;
if !self.finalized {
self.finalized = true;
if !self.channel_bufs[0].is_empty() {
update_md5(
&mut self.encoder.md5,
self.channel_bufs
.iter()
.map(|c| c.iter().copied())
.collect::<MultiZip<_>>()
.flatten(),
self.bytes_per_sample,
);
self.encoder.encode(
self.frame.fill_from_channels(
self.channel_bufs
.iter_mut()
.map(|v| v.as_mut_slice())
.collect::<ArrayVec<_, MAX_CHANNELS>>()
.as_slice(),
),
)?;
}
self.encoder.finalize_inner()
} else {
Ok(())
}
}
pub fn finalize(mut self) -> Result<(), Error> {
self.finalize_inner()?;
Ok(())
}
}
impl FlacChannelWriter<BufWriter<File>> {
#[inline]
pub fn create<P: AsRef<Path>>(
path: P,
options: Options,
sample_rate: u32,
bits_per_sample: u32,
channels: u8,
total_samples: Option<u64>,
) -> Result<Self, Error> {
FlacChannelWriter::new(
BufWriter::new(options.create(path)?),
options,
sample_rate,
bits_per_sample,
channels,
total_samples,
)
}
#[inline]
pub fn create_cdda<P: AsRef<Path>>(
path: P,
options: Options,
total_samples: Option<u64>,
) -> Result<Self, Error> {
Self::create(path, options, 44100, 16, 2, total_samples)
}
}
pub struct FlacStreamWriter<W> {
writer: W,
options: EncoderOptions,
caches: EncodingCaches,
frame: Frame,
frame_number: FrameNumber,
}
impl<W: std::io::Write> FlacStreamWriter<W> {
pub fn new(writer: W, options: Options) -> Self {
Self {
writer,
options: EncoderOptions {
max_partition_order: options.max_partition_order,
mid_side: options.mid_side,
seektable_interval: options.seektable_interval,
max_lpc_order: options.max_lpc_order,
window: options.window,
exhaustive_channel_correlation: options.exhaustive_channel_correlation,
use_rice2: false,
},
caches: EncodingCaches::default(),
frame: Frame::default(),
frame_number: FrameNumber::default(),
}
}
pub fn write(
&mut self,
sample_rate: u32,
channels: u8,
bits_per_sample: u32,
samples: &[i32],
) -> Result<(), Error> {
use crate::crc::{Crc16, CrcWriter};
use crate::stream::{BitsPerSample, FrameHeader, SampleRate};
let bits_per_sample: SignedBitCount<32> = bits_per_sample
.try_into()
.map_err(|_| Error::NonSubsetBitsPerSample)?;
if !samples.len().is_multiple_of(usize::from(channels)) {
return Err(Error::SamplesNotDivisibleByChannels);
} else if !(1..=8).contains(&channels) {
return Err(Error::ExcessiveChannels);
}
self.options.use_rice2 = u32::from(bits_per_sample) > 16;
self.frame
.resize(bits_per_sample.into(), channels.into(), 0);
self.frame.fill_from_samples(samples);
let block_size: crate::stream::BlockSize<u16> = crate::stream::BlockSize::try_from(
u16::try_from(self.frame.pcm_frames()).map_err(|_| Error::InvalidBlockSize)?,
)
.map_err(|_| Error::InvalidBlockSize)?;
let sample_rate: SampleRate<u32> = sample_rate.try_into().and_then(|rate| match rate {
SampleRate::Streaminfo(_) => Err(Error::NonSubsetSampleRate),
rate => Ok(rate),
})?;
let header_bits_per_sample = match BitsPerSample::from(bits_per_sample) {
BitsPerSample::Streaminfo(_) => return Err(Error::NonSubsetBitsPerSample),
bps => bps,
};
let mut w: CrcWriter<_, Crc16> = CrcWriter::new(&mut self.writer);
let mut bw: BitWriter<CrcWriter<&mut W, Crc16>, BigEndian>;
match self
.frame
.channels()
.collect::<ArrayVec<&[i32], MAX_CHANNELS>>()
.as_slice()
{
[channel] => {
FrameHeader {
blocking_strategy: false,
frame_number: self.frame_number,
block_size: (channel.len() as u16)
.try_into()
.expect("frame cannot be empty"),
sample_rate,
bits_per_sample: header_bits_per_sample,
channel_assignment: ChannelAssignment::Independent(Independent::Mono),
}
.write_subset(&mut w)?;
bw = BitWriter::new(w);
self.caches.channels.resize_with(1, ChannelCache::default);
encode_subframe(
&self.options,
&mut self.caches.channels[0],
CorrelatedChannel::independent(bits_per_sample, channel),
)?
.playback(&mut bw)?;
}
[left, right] if self.options.exhaustive_channel_correlation => {
let Correlated {
channel_assignment,
channels: [channel_0, channel_1],
} = correlate_channels_exhaustive(
&self.options,
&mut self.caches.correlated,
[left, right],
bits_per_sample,
)?;
FrameHeader {
blocking_strategy: false,
frame_number: self.frame_number,
block_size,
sample_rate,
bits_per_sample: header_bits_per_sample,
channel_assignment,
}
.write_subset(&mut w)?;
bw = BitWriter::new(w);
channel_0.playback(&mut bw)?;
channel_1.playback(&mut bw)?;
}
[left, right] => {
let Correlated {
channel_assignment,
channels: [channel_0, channel_1],
} = correlate_channels(
&self.options,
&mut self.caches.correlated,
[left, right],
bits_per_sample,
);
FrameHeader {
blocking_strategy: false,
frame_number: self.frame_number,
block_size,
sample_rate,
bits_per_sample: header_bits_per_sample,
channel_assignment,
}
.write_subset(&mut w)?;
self.caches.channels.resize_with(2, ChannelCache::default);
let [cache_0, cache_1] = self.caches.channels.get_disjoint_mut([0, 1]).unwrap();
let (channel_0, channel_1) = join(
|| encode_subframe(&self.options, cache_0, channel_0),
|| encode_subframe(&self.options, cache_1, channel_1),
);
bw = BitWriter::new(w);
channel_0?.playback(&mut bw)?;
channel_1?.playback(&mut bw)?;
}
channels => {
FrameHeader {
blocking_strategy: false,
frame_number: self.frame_number,
block_size,
sample_rate,
bits_per_sample: header_bits_per_sample,
channel_assignment: ChannelAssignment::Independent(
channels.len().try_into().expect("invalid channel count"),
),
}
.write_subset(&mut w)?;
bw = BitWriter::new(w);
self.caches
.channels
.resize_with(channels.len(), ChannelCache::default);
vec_map(
self.caches.channels.iter_mut().zip(channels).collect(),
|(cache, channel)| {
encode_subframe(
&self.options,
cache,
CorrelatedChannel::independent(bits_per_sample, channel),
)
},
)
.into_iter()
.try_for_each(|r| r.and_then(|r| r.playback(bw.by_ref()).map_err(Error::Io)))?;
}
}
let crc16: u16 = bw.aligned_writer()?.checksum().into();
bw.write_from(crc16)?;
if self.frame_number.try_increment().is_err() {
self.frame_number = FrameNumber::default();
}
Ok(())
}
pub fn write_cdda(&mut self, samples: &[i32]) -> Result<(), Error> {
self.write(44100, 2, 16, samples)
}
}
fn update_md5(md5: &mut md5::Context, samples: impl Iterator<Item = i32>, bytes_per_sample: usize) {
use crate::byteorder::{Endianness, LittleEndian};
match bytes_per_sample {
1 => {
for s in samples {
md5.consume(LittleEndian::i8_to_bytes(s as i8));
}
}
2 => {
for s in samples {
md5.consume(LittleEndian::i16_to_bytes(s as i16));
}
}
3 => {
for s in samples {
md5.consume(LittleEndian::i24_to_bytes(s));
}
}
4 => {
for s in samples {
md5.consume(LittleEndian::i32_to_bytes(s));
}
}
_ => panic!("unsupported number of bytes per sample"),
}
}
#[derive(Copy, Clone, Debug)]
pub enum SeekTableInterval {
Seconds(NonZero<u8>),
Frames(NonZero<usize>),
}
impl Default for SeekTableInterval {
fn default() -> Self {
Self::Seconds(NonZero::new(10).unwrap())
}
}
impl SeekTableInterval {
fn filter<'s>(
self,
sample_rate: u32,
seekpoints: impl IntoIterator<Item = EncoderSeekPoint> + 's,
) -> Box<dyn Iterator<Item = EncoderSeekPoint> + 's> {
match self {
Self::Seconds(seconds) => {
let nth_sample = u64::from(u32::from(seconds.get()) * sample_rate);
let mut offset = 0;
Box::new(seekpoints.into_iter().filter(move |point| {
if point.range().contains(&offset) {
offset += nth_sample;
true
} else {
false
}
}))
}
Self::Frames(frames) => Box::new(seekpoints.into_iter().step_by(frames.get())),
}
}
}
#[derive(Clone, Debug)]
pub struct Options {
clobber: bool,
block_size: u16,
max_partition_order: u32,
mid_side: bool,
metadata: BlockList,
seektable_interval: Option<SeekTableInterval>,
max_lpc_order: Option<NonZero<u8>>,
window: Window,
exhaustive_channel_correlation: bool,
}
impl Default for Options {
fn default() -> Self {
let mut metadata = BlockList::new(Streaminfo {
minimum_block_size: 0,
maximum_block_size: 0,
minimum_frame_size: None,
maximum_frame_size: None,
sample_rate: 0,
channels: NonZero::new(1).unwrap(),
bits_per_sample: SignedBitCount::new::<4>(),
total_samples: None,
md5: None,
});
metadata.insert(crate::metadata::Padding {
size: 4096u16.into(),
});
Self {
clobber: false,
block_size: 4096,
mid_side: true,
max_partition_order: 5,
metadata,
seektable_interval: Some(SeekTableInterval::default()),
max_lpc_order: NonZero::new(8),
window: Window::default(),
exhaustive_channel_correlation: true,
}
}
}
impl Options {
pub fn block_size(self, block_size: u16) -> Result<Self, OptionsError> {
match block_size {
0..16 => Err(OptionsError::InvalidBlockSize),
16.. => Ok(Self { block_size, ..self }),
}
}
pub fn max_lpc_order(self, max_lpc_order: Option<u8>) -> Result<Self, OptionsError> {
Ok(Self {
max_lpc_order: max_lpc_order
.map(|o| {
o.try_into()
.ok()
.filter(|o| *o <= NonZero::new(32).unwrap())
.ok_or(OptionsError::InvalidLpcOrder)
})
.transpose()?,
..self
})
}
pub fn max_partition_order(self, max_partition_order: u32) -> Result<Self, OptionsError> {
match max_partition_order {
0..=15 => Ok(Self {
max_partition_order,
..self
}),
16.. => Err(OptionsError::InvalidMaxPartitions),
}
}
pub fn mid_side(self, mid_side: bool) -> Self {
Self { mid_side, ..self }
}
pub fn window(self, window: Window) -> Self {
Self { window, ..self }
}
pub fn fast_channel_correlation(self, fast: bool) -> Self {
Self {
exhaustive_channel_correlation: !fast,
..self
}
}
pub fn padding(mut self, size: u32) -> Result<Self, OptionsError> {
use crate::metadata::Padding;
match size
.try_into()
.map_err(|_| OptionsError::ExcessivePadding)?
{
BlockSize::ZERO => self.metadata.remove::<Padding>(),
size => self.metadata.update::<Padding>(|p| {
p.size = size;
}),
}
Ok(self)
}
pub fn no_padding(mut self) -> Self {
self.metadata.remove::<crate::metadata::Padding>();
self
}
pub fn tag<S>(mut self, field: &str, value: S) -> Self
where
S: std::fmt::Display,
{
self.metadata
.update::<VorbisComment>(|vc| vc.insert(field, value));
self
}
pub fn comment(mut self, comment: VorbisComment) -> Self {
self.metadata.insert(comment);
self
}
pub fn picture(mut self, picture: Picture) -> Self {
self.metadata.insert(picture);
self
}
pub fn cuesheet(mut self, cuesheet: Cuesheet) -> Self {
self.metadata.insert(cuesheet);
self
}
pub fn application(mut self, application: Application) -> Self {
self.metadata.insert(application);
self
}
pub fn seektable_seconds(mut self, seconds: u8) -> Self {
self.seektable_interval = NonZero::new(seconds).map(SeekTableInterval::Seconds);
self
}
pub fn seektable_frames(mut self, frames: usize) -> Self {
self.seektable_interval = NonZero::new(frames).map(SeekTableInterval::Frames);
self
}
pub fn no_seektable(self) -> Self {
Self {
seektable_interval: None,
..self
}
}
pub fn add_block<B>(&mut self, block: B) -> &mut Self
where
B: PortableMetadataBlock,
{
self.metadata.insert(block);
self
}
pub fn add_blocks<B>(&mut self, iter: impl IntoIterator<Item = B>) -> &mut Self
where
B: PortableMetadataBlock,
{
for block in iter {
self.metadata.insert(block);
}
self
}
pub fn overwrite(mut self) -> Self {
self.clobber = true;
self
}
pub fn fast() -> Self {
Self {
block_size: 1152,
mid_side: false,
max_partition_order: 3,
max_lpc_order: None,
exhaustive_channel_correlation: false,
..Self::default()
}
}
pub fn best() -> Self {
Self {
block_size: 4096,
mid_side: true,
max_partition_order: 6,
max_lpc_order: NonZero::new(12),
..Self::default()
}
}
fn create<P: AsRef<Path>>(&self, path: P) -> std::io::Result<File> {
if self.clobber {
File::create(path)
} else {
use std::fs::OpenOptions;
OpenOptions::new()
.write(true)
.create_new(true)
.open(path.as_ref())
}
}
}
#[derive(Debug)]
pub enum OptionsError {
InvalidBlockSize,
InvalidLpcOrder,
InvalidMaxPartitions,
ExcessivePadding,
}
impl std::error::Error for OptionsError {}
impl std::fmt::Display for OptionsError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::InvalidBlockSize => "block size must be >= 16".fmt(f),
Self::InvalidLpcOrder => "maximum LPC order must be <= 32".fmt(f),
Self::InvalidMaxPartitions => "max partition order must be <= 15".fmt(f),
Self::ExcessivePadding => "padding size is too large for block".fmt(f),
}
}
}
struct EncoderOptions {
max_partition_order: u32,
mid_side: bool,
seektable_interval: Option<SeekTableInterval>,
max_lpc_order: Option<NonZero<u8>>,
window: Window,
exhaustive_channel_correlation: bool,
use_rice2: bool,
}
#[derive(Copy, Clone, Debug)]
pub enum Window {
Rectangle,
Hann,
Tukey(f32),
}
impl Window {
fn generate(&self, window: &mut [f64]) {
use std::f64::consts::PI;
match self {
Self::Rectangle => window.fill(1.0),
Self::Hann => {
let np =
f64::from(u16::try_from(window.len()).expect("window size too large")) - 1.0;
window.iter_mut().zip(0u16..).for_each(|(w, n)| {
*w = 0.5 - 0.5 * (2.0 * PI * f64::from(n) / np).cos();
});
}
Self::Tukey(p) => match p {
..=0.0 => {
window.fill(1.0);
}
1.0.. => {
Self::Hann.generate(window);
}
0.0..1.0 => {
match ((f64::from(*p) / 2.0 * window.len() as f64) as usize).checked_sub(1) {
Some(np) => match window.get_disjoint_mut([
0..np,
np..window.len() - np,
window.len() - np..window.len(),
]) {
Ok([first, mid, last]) => {
let np = u16::try_from(np).expect("window size too large");
for ((x, y), n) in
first.iter_mut().zip(last.iter_mut().rev()).zip(0u16..)
{
*x = 0.5 - 0.5 * (PI * f64::from(n) / f64::from(np)).cos();
*y = *x;
}
mid.fill(1.0);
}
Err(_) => {
window.fill(1.0);
}
},
None => {
window.fill(1.0);
}
}
}
_ => {
Self::Tukey(0.5).generate(window);
}
},
}
}
fn apply<'w>(
&self,
window: &mut Vec<f64>,
cache: &'w mut Vec<f64>,
samples: &[i32],
) -> &'w [f64] {
if window.len() != samples.len() {
window.resize(samples.len(), 0.0);
self.generate(window);
}
cache.clear();
cache.extend(samples.iter().zip(window).map(|(s, w)| f64::from(*s) * *w));
cache.as_slice()
}
}
impl Default for Window {
fn default() -> Self {
Self::Tukey(0.5)
}
}
#[derive(Default)]
struct EncodingCaches {
channels: Vec<ChannelCache>,
correlated: CorrelationCache,
}
#[derive(Default)]
struct CorrelationCache {
average_samples: Vec<i32>,
difference_samples: Vec<i32>,
left_cache: ChannelCache,
right_cache: ChannelCache,
average_cache: ChannelCache,
difference_cache: ChannelCache,
}
#[derive(Default)]
struct ChannelCache {
fixed: FixedCache,
fixed_output: BitRecorder<u32, BigEndian>,
lpc: LpcCache,
lpc_output: BitRecorder<u32, BigEndian>,
constant_output: BitRecorder<u32, BigEndian>,
verbatim_output: BitRecorder<u32, BigEndian>,
wasted: Vec<i32>,
}
#[derive(Default)]
struct FixedCache {
fixed_buffers: [Vec<i32>; 4],
}
#[derive(Default)]
struct LpcCache {
window: Vec<f64>,
windowed: Vec<f64>,
residuals: Vec<i32>,
}
struct Encoder<W: std::io::Write + std::io::Seek> {
writer: Counter<W>,
start: u64,
options: EncoderOptions,
caches: EncodingCaches,
blocks: BlockList,
sample_rate: SampleRate<u32>,
frame_number: FrameNumber,
samples_written: u64,
seekpoints: Vec<EncoderSeekPoint>,
md5: md5::Context,
finalized: bool,
}
impl<W: std::io::Write + std::io::Seek> Encoder<W> {
const MAX_SAMPLES: u64 = 68_719_476_736;
fn new(
mut writer: W,
options: Options,
sample_rate: u32,
bits_per_sample: SignedBitCount<32>,
channels: u8,
total_samples: Option<NonZero<u64>>,
) -> Result<Self, Error> {
use crate::metadata::OptionalBlockType;
let mut blocks = options.metadata;
*blocks.streaminfo_mut() = Streaminfo {
minimum_block_size: options.block_size,
maximum_block_size: options.block_size,
minimum_frame_size: None,
maximum_frame_size: None,
sample_rate: (0..1048576)
.contains(&sample_rate)
.then_some(sample_rate)
.ok_or(Error::InvalidSampleRate)?,
bits_per_sample,
channels: (1..=8)
.contains(&channels)
.then_some(channels)
.and_then(NonZero::new)
.ok_or(Error::ExcessiveChannels)?,
total_samples: match total_samples {
None => None,
total_samples @ Some(samples) => match samples.get() {
0..Self::MAX_SAMPLES => total_samples,
_ => return Err(Error::ExcessiveTotalSamples),
},
},
md5: None,
};
if let Some(total_samples) = total_samples
&& let Some(placeholders) = options.seektable_interval.map(|s| {
s.filter(
sample_rate,
EncoderSeekPoint::placeholders(total_samples.get(), options.block_size),
)
})
{
use crate::metadata::SeekTable;
blocks.insert(SeekTable {
points: placeholders
.take(SeekTable::MAX_POINTS)
.map(|p| p.into())
.collect::<Vec<_>>()
.try_into()
.unwrap(),
});
}
let start = writer.stream_position()?;
blocks.sort_by(|block| match block {
OptionalBlockType::VorbisComment => 0,
OptionalBlockType::SeekTable => 1,
OptionalBlockType::Picture => 2,
OptionalBlockType::Application => 3,
OptionalBlockType::Cuesheet => 4,
OptionalBlockType::Padding => 5,
});
write_blocks(writer.by_ref(), blocks.blocks())?;
Ok(Self {
start,
writer: Counter::new(writer),
options: EncoderOptions {
max_partition_order: options.max_partition_order,
mid_side: options.mid_side,
seektable_interval: options.seektable_interval,
max_lpc_order: options.max_lpc_order,
window: options.window,
exhaustive_channel_correlation: options.exhaustive_channel_correlation,
use_rice2: u32::from(bits_per_sample) > 16,
},
caches: EncodingCaches::default(),
sample_rate: blocks
.streaminfo()
.sample_rate
.try_into()
.expect("invalid sample rate"),
blocks,
frame_number: FrameNumber::default(),
samples_written: 0,
seekpoints: Vec::new(),
md5: md5::Context::new(),
finalized: false,
})
}
fn channel_count(&self) -> NonZero<u8> {
self.blocks.streaminfo().channels
}
fn encode(&mut self, frame: &Frame) -> Result<(), Error> {
self.seekpoints.push(EncoderSeekPoint {
sample_offset: self.samples_written,
byte_offset: Some(self.writer.count),
frame_samples: frame.pcm_frames() as u16,
});
self.samples_written += frame.pcm_frames() as u64;
if let Some(total_samples) = self.blocks.streaminfo().total_samples
&& self.samples_written > total_samples.get()
{
return Err(Error::ExcessiveTotalSamples);
}
encode_frame(
&self.options,
&mut self.caches,
&mut self.writer,
self.blocks.streaminfo_mut(),
&mut self.frame_number,
self.sample_rate,
frame.channels().collect(),
)
}
fn finalize_inner(&mut self) -> Result<(), Error> {
if !self.finalized {
use crate::metadata::SeekTable;
self.finalized = true;
if let Some(encoded_points) = self
.options
.seektable_interval
.map(|s| s.filter(self.sample_rate.into(), self.seekpoints.iter().cloned()))
{
match self.blocks.get_pair_mut() {
(Some(SeekTable { points }), _) => {
let points_len = points.len();
points.clear();
points
.try_extend(
encoded_points
.into_iter()
.map(|p| p.into())
.chain(std::iter::repeat(SeekPoint::Placeholder))
.take(points_len),
)
.unwrap();
}
(None, Some(crate::metadata::Padding { size: padding_size })) => {
use crate::metadata::MetadataBlock;
let seektable = SeekTable {
points: encoded_points
.map(|p| p.into())
.collect::<Vec<_>>()
.try_into()
.unwrap(),
};
if let Some(new_padding_size) = seektable
.total_size()
.and_then(|seektable_size| padding_size.checked_sub(seektable_size))
{
*padding_size = new_padding_size;
self.blocks.insert(seektable);
}
}
(None, None) => { }
}
}
match &mut self.blocks.streaminfo_mut().total_samples {
Some(expected) => {
if expected.get() != self.samples_written {
return Err(Error::SampleCountMismatch);
}
}
expected @ None => {
if self.samples_written < Self::MAX_SAMPLES {
*expected =
Some(NonZero::new(self.samples_written).ok_or(Error::NoSamples)?);
} else {
return Err(Error::ExcessiveTotalSamples);
}
}
}
self.blocks.streaminfo_mut().md5 = Some(self.md5.clone().finalize().0);
let writer = self.writer.stream();
writer.seek(std::io::SeekFrom::Start(self.start))?;
write_blocks(writer.by_ref(), self.blocks.blocks())
} else {
Ok(())
}
}
}
impl<W: std::io::Write + std::io::Seek> Drop for Encoder<W> {
fn drop(&mut self) {
let _ = self.finalize_inner();
}
}
#[derive(Debug, Clone)]
struct EncoderSeekPoint {
sample_offset: u64,
byte_offset: Option<u64>,
frame_samples: u16,
}
impl EncoderSeekPoint {
fn placeholders(total_samples: u64, block_size: u16) -> impl Iterator<Item = EncoderSeekPoint> {
(0..total_samples)
.step_by(usize::from(block_size))
.map(move |sample_offset| EncoderSeekPoint {
sample_offset,
byte_offset: None,
frame_samples: u16::try_from(total_samples - sample_offset)
.map(|s| s.min(block_size))
.unwrap_or(block_size),
})
}
fn range(&self) -> std::ops::Range<u64> {
self.sample_offset..(self.sample_offset + u64::from(self.frame_samples))
}
}
impl From<EncoderSeekPoint> for SeekPoint {
fn from(p: EncoderSeekPoint) -> Self {
match p.byte_offset {
Some(byte_offset) => Self::Defined {
sample_offset: p.sample_offset,
byte_offset,
frame_samples: p.frame_samples,
},
None => Self::Placeholder,
}
}
}
pub fn generate_seektable<R: std::io::Read>(
r: R,
interval: SeekTableInterval,
) -> Result<crate::metadata::SeekTable, Error> {
use crate::{
metadata::{Metadata, SeekTable},
stream::FrameIterator,
};
let iter = FrameIterator::new(r)?;
let metadata_len = iter.metadata_len();
let sample_rate = iter.sample_rate();
let mut sample_offset = 0;
iter.map(|r| {
r.map(|(frame, offset)| EncoderSeekPoint {
sample_offset,
byte_offset: Some(offset - metadata_len),
frame_samples: frame.header.block_size.into(),
})
.inspect(|p| {
sample_offset += u64::from(p.frame_samples);
})
})
.collect::<Result<Vec<_>, _>>()
.map(|seekpoints| SeekTable {
points: interval
.filter(sample_rate, seekpoints)
.take(SeekTable::MAX_POINTS)
.map(|p| p.into())
.collect::<Vec<_>>()
.try_into()
.unwrap(),
})
}
fn encode_frame<W>(
options: &EncoderOptions,
cache: &mut EncodingCaches,
mut writer: W,
streaminfo: &mut Streaminfo,
frame_number: &mut FrameNumber,
sample_rate: SampleRate<u32>,
frame: ArrayVec<&[i32], MAX_CHANNELS>,
) -> Result<(), Error>
where
W: std::io::Write,
{
use crate::Counter;
use crate::crc::{Crc16, CrcWriter};
use crate::stream::FrameHeader;
use bitstream_io::BigEndian;
debug_assert!(!frame.is_empty());
let size = Counter::new(writer.by_ref());
let mut w: CrcWriter<_, Crc16> = CrcWriter::new(size);
let mut bw: BitWriter<CrcWriter<Counter<&mut W>, Crc16>, BigEndian>;
match frame.as_slice() {
[channel] => {
FrameHeader {
blocking_strategy: false,
frame_number: *frame_number,
block_size: (channel.len() as u16)
.try_into()
.expect("frame cannot be empty"),
sample_rate,
bits_per_sample: streaminfo.bits_per_sample.into(),
channel_assignment: ChannelAssignment::Independent(Independent::Mono),
}
.write(&mut w, streaminfo)?;
bw = BitWriter::new(w);
cache.channels.resize_with(1, ChannelCache::default);
encode_subframe(
options,
&mut cache.channels[0],
CorrelatedChannel::independent(streaminfo.bits_per_sample, channel),
)?
.playback(&mut bw)?;
}
[left, right] if options.exhaustive_channel_correlation => {
let Correlated {
channel_assignment,
channels: [channel_0, channel_1],
} = correlate_channels_exhaustive(
options,
&mut cache.correlated,
[left, right],
streaminfo.bits_per_sample,
)?;
FrameHeader {
blocking_strategy: false,
frame_number: *frame_number,
block_size: (frame[0].len() as u16)
.try_into()
.expect("frame cannot be empty"),
sample_rate,
bits_per_sample: streaminfo.bits_per_sample.into(),
channel_assignment,
}
.write(&mut w, streaminfo)?;
bw = BitWriter::new(w);
channel_0.playback(&mut bw)?;
channel_1.playback(&mut bw)?;
}
[left, right] => {
let Correlated {
channel_assignment,
channels: [channel_0, channel_1],
} = correlate_channels(
options,
&mut cache.correlated,
[left, right],
streaminfo.bits_per_sample,
);
FrameHeader {
blocking_strategy: false,
frame_number: *frame_number,
block_size: (frame[0].len() as u16)
.try_into()
.expect("frame cannot be empty"),
sample_rate,
bits_per_sample: streaminfo.bits_per_sample.into(),
channel_assignment,
}
.write(&mut w, streaminfo)?;
cache.channels.resize_with(2, ChannelCache::default);
let [cache_0, cache_1] = cache.channels.get_disjoint_mut([0, 1]).unwrap();
let (channel_0, channel_1) = join(
|| encode_subframe(options, cache_0, channel_0),
|| encode_subframe(options, cache_1, channel_1),
);
bw = BitWriter::new(w);
channel_0?.playback(&mut bw)?;
channel_1?.playback(&mut bw)?;
}
channels => {
FrameHeader {
blocking_strategy: false,
frame_number: *frame_number,
block_size: (channels[0].len() as u16)
.try_into()
.expect("frame cannot be empty"),
sample_rate,
bits_per_sample: streaminfo.bits_per_sample.into(),
channel_assignment: ChannelAssignment::Independent(
frame.len().try_into().expect("invalid channel count"),
),
}
.write(&mut w, streaminfo)?;
bw = BitWriter::new(w);
cache
.channels
.resize_with(channels.len(), ChannelCache::default);
vec_map(
cache.channels.iter_mut().zip(channels).collect(),
|(cache, channel)| {
encode_subframe(
options,
cache,
CorrelatedChannel::independent(streaminfo.bits_per_sample, channel),
)
},
)
.into_iter()
.try_for_each(|r| r.and_then(|r| r.playback(bw.by_ref()).map_err(Error::Io)))?;
}
}
let crc16: u16 = bw.aligned_writer()?.checksum().into();
bw.write_from(crc16)?;
frame_number.try_increment()?;
if let s @ Some(size) = u32::try_from(bw.into_writer().into_writer().count)
.ok()
.filter(|size| *size < Streaminfo::MAX_FRAME_SIZE)
.and_then(NonZero::new)
{
match &mut streaminfo.minimum_frame_size {
Some(min_size) => {
*min_size = size.min(*min_size);
}
min_size @ None => {
*min_size = s;
}
}
match &mut streaminfo.maximum_frame_size {
Some(max_size) => {
*max_size = size.max(*max_size);
}
max_size @ None => {
*max_size = s;
}
}
}
Ok(())
}
struct Correlated<C> {
channel_assignment: ChannelAssignment,
channels: [C; 2],
}
struct CorrelatedChannel<'c> {
samples: &'c [i32],
bits_per_sample: SignedBitCount<32>,
all_0: bool,
}
impl<'c> CorrelatedChannel<'c> {
fn independent(bits_per_sample: SignedBitCount<32>, samples: &'c [i32]) -> Self {
Self {
all_0: samples.iter().all(|s| *s == 0),
bits_per_sample,
samples,
}
}
}
fn correlate_channels<'c>(
options: &EncoderOptions,
CorrelationCache {
average_samples,
difference_samples,
..
}: &'c mut CorrelationCache,
[left, right]: [&'c [i32]; 2],
bits_per_sample: SignedBitCount<32>,
) -> Correlated<CorrelatedChannel<'c>> {
match bits_per_sample.checked_add::<32>(1) {
Some(difference_bits_per_sample) if options.mid_side => {
let mut left_abs_sum = 0;
let mut right_abs_sum = 0;
let mut mid_abs_sum = 0;
let mut side_abs_sum = 0;
join(
|| {
average_samples.clear();
average_samples.extend(
left.iter()
.inspect(|s| left_abs_sum += u64::from(s.unsigned_abs()))
.zip(
right
.iter()
.inspect(|s| right_abs_sum += u64::from(s.unsigned_abs())),
)
.map(|(l, r)| (l + r) >> 1)
.inspect(|s| mid_abs_sum += u64::from(s.unsigned_abs())),
);
},
|| {
difference_samples.clear();
difference_samples.extend(
left.iter()
.zip(right)
.map(|(l, r)| l - r)
.inspect(|s| side_abs_sum += u64::from(s.unsigned_abs())),
);
},
);
match [
(
ChannelAssignment::Independent(Independent::Stereo),
left_abs_sum + right_abs_sum,
),
(ChannelAssignment::LeftSide, left_abs_sum + side_abs_sum),
(ChannelAssignment::SideRight, side_abs_sum + right_abs_sum),
(ChannelAssignment::MidSide, mid_abs_sum + side_abs_sum),
]
.into_iter()
.min_by_key(|(_, total)| *total)
.unwrap()
.0
{
channel_assignment @ ChannelAssignment::LeftSide => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: left,
bits_per_sample,
all_0: left_abs_sum == 0,
},
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: side_abs_sum == 0,
},
],
},
channel_assignment @ ChannelAssignment::SideRight => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: side_abs_sum == 0,
},
CorrelatedChannel {
samples: right,
bits_per_sample,
all_0: right_abs_sum == 0,
},
],
},
channel_assignment @ ChannelAssignment::MidSide => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: average_samples,
bits_per_sample,
all_0: mid_abs_sum == 0,
},
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: side_abs_sum == 0,
},
],
},
channel_assignment @ ChannelAssignment::Independent(_) => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: left,
bits_per_sample,
all_0: left_abs_sum == 0,
},
CorrelatedChannel {
samples: right,
bits_per_sample,
all_0: right_abs_sum == 0,
},
],
},
}
}
Some(difference_bits_per_sample) => {
let mut left_abs_sum = 0;
let mut right_abs_sum = 0;
let mut side_abs_sum = 0;
difference_samples.clear();
difference_samples.extend(
left.iter()
.inspect(|s| left_abs_sum += u64::from(s.unsigned_abs()))
.zip(
right
.iter()
.inspect(|s| right_abs_sum += u64::from(s.unsigned_abs())),
)
.map(|(l, r)| l - r)
.inspect(|s| side_abs_sum += u64::from(s.unsigned_abs())),
);
match [
(ChannelAssignment::LeftSide, left_abs_sum + side_abs_sum),
(ChannelAssignment::SideRight, side_abs_sum + right_abs_sum),
(
ChannelAssignment::Independent(Independent::Stereo),
left_abs_sum + right_abs_sum,
),
]
.into_iter()
.min_by_key(|(_, total)| *total)
.unwrap()
.0
{
channel_assignment @ ChannelAssignment::LeftSide => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: left,
bits_per_sample,
all_0: left_abs_sum == 0,
},
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: side_abs_sum == 0,
},
],
},
channel_assignment @ ChannelAssignment::SideRight => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: side_abs_sum == 0,
},
CorrelatedChannel {
samples: right,
bits_per_sample,
all_0: right_abs_sum == 0,
},
],
},
ChannelAssignment::MidSide => unreachable!(),
channel_assignment @ ChannelAssignment::Independent(_) => Correlated {
channel_assignment,
channels: [
CorrelatedChannel {
samples: left,
bits_per_sample,
all_0: left_abs_sum == 0,
},
CorrelatedChannel {
samples: right,
bits_per_sample,
all_0: right_abs_sum == 0,
},
],
},
}
}
None => {
Correlated {
channel_assignment: ChannelAssignment::Independent(Independent::Stereo),
channels: [
CorrelatedChannel::independent(bits_per_sample, left),
CorrelatedChannel::independent(bits_per_sample, right),
],
}
}
}
}
fn correlate_channels_exhaustive<'c>(
options: &EncoderOptions,
CorrelationCache {
average_samples,
difference_samples,
left_cache,
right_cache,
average_cache,
difference_cache,
..
}: &'c mut CorrelationCache,
[left, right]: [&'c [i32]; 2],
bits_per_sample: SignedBitCount<32>,
) -> Result<Correlated<&'c BitRecorder<u32, BigEndian>>, Error> {
let (left_recorder, right_recorder) = try_join(
|| {
encode_subframe(
options,
left_cache,
CorrelatedChannel {
samples: left,
bits_per_sample,
all_0: false,
},
)
},
|| {
encode_subframe(
options,
right_cache,
CorrelatedChannel {
samples: right,
bits_per_sample,
all_0: false,
},
)
},
)?;
match bits_per_sample.checked_add::<32>(1) {
Some(difference_bits_per_sample) if options.mid_side => {
let (average_recorder, difference_recorder) = try_join(
|| {
average_samples.clear();
average_samples
.extend(left.iter().zip(right.iter()).map(|(l, r)| (l + r) >> 1));
encode_subframe(
options,
average_cache,
CorrelatedChannel {
samples: average_samples,
bits_per_sample,
all_0: false,
},
)
},
|| {
difference_samples.clear();
difference_samples.extend(left.iter().zip(right).map(|(l, r)| l - r));
encode_subframe(
options,
difference_cache,
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: false,
},
)
},
)?;
match [
(
ChannelAssignment::Independent(Independent::Stereo),
left_recorder.written() + right_recorder.written(),
),
(
ChannelAssignment::LeftSide,
left_recorder.written() + difference_recorder.written(),
),
(
ChannelAssignment::SideRight,
difference_recorder.written() + right_recorder.written(),
),
(
ChannelAssignment::MidSide,
average_recorder.written() + difference_recorder.written(),
),
]
.into_iter()
.min_by_key(|(_, total)| *total)
.unwrap()
.0
{
channel_assignment @ ChannelAssignment::LeftSide => Ok(Correlated {
channel_assignment,
channels: [left_recorder, difference_recorder],
}),
channel_assignment @ ChannelAssignment::SideRight => Ok(Correlated {
channel_assignment,
channels: [difference_recorder, right_recorder],
}),
channel_assignment @ ChannelAssignment::MidSide => Ok(Correlated {
channel_assignment,
channels: [average_recorder, difference_recorder],
}),
channel_assignment @ ChannelAssignment::Independent(_) => Ok(Correlated {
channel_assignment,
channels: [left_recorder, right_recorder],
}),
}
}
Some(difference_bits_per_sample) => {
let difference_recorder = {
difference_samples.clear();
difference_samples.extend(left.iter().zip(right).map(|(l, r)| l - r));
encode_subframe(
options,
difference_cache,
CorrelatedChannel {
samples: difference_samples,
bits_per_sample: difference_bits_per_sample,
all_0: false,
},
)?
};
match [
(
ChannelAssignment::Independent(Independent::Stereo),
left_recorder.written() + right_recorder.written(),
),
(
ChannelAssignment::LeftSide,
left_recorder.written() + difference_recorder.written(),
),
(
ChannelAssignment::SideRight,
difference_recorder.written() + right_recorder.written(),
),
]
.into_iter()
.min_by_key(|(_, total)| *total)
.unwrap()
.0
{
channel_assignment @ ChannelAssignment::LeftSide => Ok(Correlated {
channel_assignment,
channels: [left_recorder, difference_recorder],
}),
channel_assignment @ ChannelAssignment::SideRight => Ok(Correlated {
channel_assignment,
channels: [difference_recorder, right_recorder],
}),
ChannelAssignment::MidSide => unreachable!(),
channel_assignment @ ChannelAssignment::Independent(_) => Ok(Correlated {
channel_assignment,
channels: [left_recorder, right_recorder],
}),
}
}
None => {
Ok(Correlated {
channel_assignment: ChannelAssignment::Independent(Independent::Stereo),
channels: [left_recorder, right_recorder],
})
}
}
}
fn encode_subframe<'c>(
options: &EncoderOptions,
ChannelCache {
fixed: fixed_cache,
fixed_output,
lpc: lpc_cache,
lpc_output,
constant_output,
verbatim_output,
wasted,
}: &'c mut ChannelCache,
CorrelatedChannel {
samples: channel,
bits_per_sample,
all_0,
}: CorrelatedChannel,
) -> Result<&'c BitRecorder<u32, BigEndian>, Error> {
const WASTED_MAX: NonZero<u32> = NonZero::new(32).unwrap();
debug_assert!(!channel.is_empty());
if all_0 {
constant_output.clear();
encode_constant_subframe(constant_output, channel[0], bits_per_sample, 0)?;
return Ok(constant_output);
}
let (channel, bits_per_sample, wasted_bps) =
match channel.iter().try_fold(WASTED_MAX, |acc, sample| {
NonZero::new(sample.trailing_zeros()).map(|sample| sample.min(acc))
}) {
None => (channel, bits_per_sample, 0),
Some(WASTED_MAX) => {
constant_output.clear();
encode_constant_subframe(constant_output, channel[0], bits_per_sample, 0)?;
return Ok(constant_output);
}
Some(wasted_bps) => {
let wasted_bps = wasted_bps.get();
wasted.clear();
wasted.extend(channel.iter().map(|sample| sample >> wasted_bps));
(
wasted.as_slice(),
bits_per_sample.checked_sub(wasted_bps).unwrap(),
wasted_bps,
)
}
};
fixed_output.clear();
let best = match options.max_lpc_order {
Some(max_lpc_order) => {
lpc_output.clear();
match join(
|| {
encode_fixed_subframe(
options,
fixed_cache,
fixed_output,
channel,
bits_per_sample,
wasted_bps,
)
},
|| {
encode_lpc_subframe(
options,
max_lpc_order,
lpc_cache,
lpc_output,
channel,
bits_per_sample,
wasted_bps,
)
},
) {
(Ok(()), Ok(())) => [fixed_output, lpc_output]
.into_iter()
.min_by_key(|c| c.written())
.unwrap(),
(Err(_), Ok(())) => lpc_output,
(Ok(()), Err(_)) => fixed_output,
(Err(_), Err(_)) => {
verbatim_output.clear();
encode_verbatim_subframe(
verbatim_output,
channel,
bits_per_sample,
wasted_bps,
)?;
return Ok(verbatim_output);
}
}
}
_ => {
match encode_fixed_subframe(
options,
fixed_cache,
fixed_output,
channel,
bits_per_sample,
wasted_bps,
) {
Ok(()) => fixed_output,
Err(_) => {
verbatim_output.clear();
encode_verbatim_subframe(
verbatim_output,
channel,
bits_per_sample,
wasted_bps,
)?;
return Ok(verbatim_output);
}
}
}
};
let verbatim_len = channel.len() as u32 * u32::from(bits_per_sample);
if best.written() < verbatim_len {
Ok(best)
} else {
verbatim_output.clear();
encode_verbatim_subframe(verbatim_output, channel, bits_per_sample, wasted_bps)?;
Ok(verbatim_output)
}
}
fn encode_constant_subframe<W: BitWrite>(
writer: &mut W,
sample: i32,
bits_per_sample: SignedBitCount<32>,
wasted_bps: u32,
) -> Result<(), Error> {
use crate::stream::{SubframeHeader, SubframeHeaderType};
writer.build(&SubframeHeader {
type_: SubframeHeaderType::Constant,
wasted_bps,
})?;
writer
.write_signed_counted(bits_per_sample, sample)
.map_err(Error::Io)
}
fn encode_verbatim_subframe<W: BitWrite>(
writer: &mut W,
channel: &[i32],
bits_per_sample: SignedBitCount<32>,
wasted_bps: u32,
) -> Result<(), Error> {
use crate::stream::{SubframeHeader, SubframeHeaderType};
writer.build(&SubframeHeader {
type_: SubframeHeaderType::Verbatim,
wasted_bps,
})?;
channel
.iter()
.try_for_each(|i| writer.write_signed_counted(bits_per_sample, *i))?;
Ok(())
}
fn encode_fixed_subframe<W: BitWrite>(
options: &EncoderOptions,
FixedCache {
fixed_buffers: buffers,
}: &mut FixedCache,
writer: &mut W,
channel: &[i32],
bits_per_sample: SignedBitCount<32>,
wasted_bps: u32,
) -> Result<(), Error> {
use crate::stream::{SubframeHeader, SubframeHeaderType};
let (order, warm_up, residuals) = {
let mut fixed_orders = ArrayVec::<&[i32], 5>::new();
fixed_orders.push(channel);
'outer: for buf in buffers.iter_mut() {
let prev_order = fixed_orders.last().unwrap();
match prev_order.split_at_checked(1) {
Some((_, r)) => {
buf.clear();
for (n, p) in r.iter().zip(*prev_order) {
match n.checked_sub(*p) {
Some(v) => {
buf.push(v);
}
None => break 'outer,
}
}
if buf.is_empty() {
break;
} else {
fixed_orders.push(buf.as_slice());
}
}
None => break,
}
}
let min_fixed = fixed_orders.last().unwrap().len();
fixed_orders
.into_iter()
.enumerate()
.min_by_key(|(_, residuals)| {
residuals[(residuals.len() - min_fixed)..]
.iter()
.map(|r| u64::from(r.unsigned_abs()))
.sum::<u64>()
})
.map(|(order, residuals)| (order as u8, &channel[0..order], residuals))
.unwrap()
};
writer.build(&SubframeHeader {
type_: SubframeHeaderType::Fixed { order },
wasted_bps,
})?;
warm_up
.iter()
.try_for_each(|sample: &i32| writer.write_signed_counted(bits_per_sample, *sample))?;
write_residuals(options, writer, order.into(), residuals)
}
fn encode_lpc_subframe<W: BitWrite>(
options: &EncoderOptions,
max_lpc_order: NonZero<u8>,
cache: &mut LpcCache,
writer: &mut W,
channel: &[i32],
bits_per_sample: SignedBitCount<32>,
wasted_bps: u32,
) -> Result<(), Error> {
use crate::stream::{SubframeHeader, SubframeHeaderType};
let LpcSubframeParameters {
warm_up,
residuals,
parameters:
LpcParameters {
order,
precision,
shift,
coefficients,
},
} = LpcSubframeParameters::best(options, bits_per_sample, max_lpc_order, cache, channel)?;
writer.build(&SubframeHeader {
type_: SubframeHeaderType::Lpc { order },
wasted_bps,
})?;
for sample in warm_up {
writer.write_signed_counted(bits_per_sample, *sample)?;
}
writer.write_count::<0b1111>(
precision
.count()
.checked_sub(1)
.ok_or(Error::InvalidQlpPrecision)?,
)?;
writer.write::<5, i32>(shift as i32)?;
for coeff in coefficients {
writer.write_signed_counted(precision, coeff)?;
}
write_residuals(options, writer, order.get().into(), residuals)
}
struct LpcSubframeParameters<'w, 'r> {
parameters: LpcParameters,
warm_up: &'w [i32],
residuals: &'r [i32],
}
impl<'w, 'r> LpcSubframeParameters<'w, 'r> {
fn best(
options: &EncoderOptions,
bits_per_sample: SignedBitCount<32>,
max_lpc_order: NonZero<u8>,
LpcCache {
residuals,
window,
windowed,
}: &'r mut LpcCache,
channel: &'w [i32],
) -> Result<Self, Error> {
let parameters = LpcParameters::best(
options,
bits_per_sample,
max_lpc_order,
window,
windowed,
channel,
)?;
Self::encode_residuals(¶meters, channel, residuals)
.map(|(warm_up, residuals)| Self {
warm_up,
residuals,
parameters,
})
.map_err(|ResidualOverflow| Error::ResidualOverflow)
}
fn encode_residuals(
parameters: &LpcParameters,
channel: &'w [i32],
residuals: &'r mut Vec<i32>,
) -> Result<(&'w [i32], &'r [i32]), ResidualOverflow> {
residuals.clear();
for split in usize::from(parameters.order.get())..channel.len() {
let (previous, current) = channel.split_at(split);
residuals.push(
current[0]
.checked_sub(
(previous
.iter()
.rev()
.zip(¶meters.coefficients)
.map(|(x, y)| *x as i64 * *y as i64)
.sum::<i64>()
>> parameters.shift) as i32,
)
.ok_or(ResidualOverflow)?,
);
}
Ok((
&channel[0..parameters.order.get().into()],
residuals.as_slice(),
))
}
}
#[derive(Debug)]
struct ResidualOverflow;
impl From<ResidualOverflow> for Error {
#[inline]
fn from(_: ResidualOverflow) -> Self {
Error::ResidualOverflow
}
}
#[test]
fn test_residual_encoding_1() {
let samples = [
0, 16, 31, 44, 54, 61, 64, 63, 58, 49, 38, 24, 8, -8, -24, -38, -49, -58, -63, -64, -61,
-54, -44, -31, -16,
];
let expected_residuals = [
2, 2, 2, 3, 3, 3, 2, 2, 3, 0, 0, 0, -1, -1, -1, -3, -2, -2, -2, -1, -1, 0, 0,
];
let mut actual_residuals = Vec::with_capacity(expected_residuals.len());
let (warm_up, residuals) = LpcSubframeParameters::encode_residuals(
&LpcParameters {
order: NonZero::new(2).unwrap(),
precision: SignedBitCount::new::<7>(),
shift: 5,
coefficients: arrayvec![59, -30],
},
&samples,
&mut actual_residuals,
)
.unwrap();
assert_eq!(warm_up, &samples[0..2]);
assert_eq!(residuals, &expected_residuals);
}
#[test]
fn test_residual_encoding_2() {
let samples = [
64, 62, 56, 47, 34, 20, 4, -12, -27, -41, -52, -60, -63, -63, -60, -52, -41, -27, -12, 4,
20, 34, 47, 56, 62,
];
let expected_residuals = [
2, 2, 0, 1, -1, -1, -1, -2, -2, -2, -1, -3, -2, 0, -1, 1, 0, 2, 2, 2, 4, 2, 4,
];
let mut actual_residuals = Vec::with_capacity(expected_residuals.len());
let (warm_up, residuals) = LpcSubframeParameters::encode_residuals(
&LpcParameters {
order: NonZero::new(2).unwrap(),
precision: SignedBitCount::new::<7>(),
shift: 5,
coefficients: arrayvec![58, -29],
},
&samples,
&mut actual_residuals,
)
.unwrap();
assert_eq!(warm_up, &samples[0..2]);
assert_eq!(residuals, &expected_residuals);
}
#[derive(Debug)]
struct LpcParameters {
order: NonZero<u8>,
precision: SignedBitCount<15>,
shift: u32,
coefficients: ArrayVec<i32, MAX_LPC_COEFFS>,
}
impl LpcParameters {
fn best(
options: &EncoderOptions,
bits_per_sample: SignedBitCount<32>,
max_lpc_order: NonZero<u8>,
window: &mut Vec<f64>,
windowed: &mut Vec<f64>,
channel: &[i32],
) -> Result<Self, Error> {
if channel.len() <= usize::from(max_lpc_order.get()) {
return Err(Error::InsufficientLpcSamples);
}
let precision = match channel.len() {
0 => panic!("at least one sample required in channel"),
1..=192 => SignedBitCount::new::<7>(),
193..=384 => SignedBitCount::new::<8>(),
385..=576 => SignedBitCount::new::<9>(),
577..=1152 => SignedBitCount::new::<10>(),
1153..=2304 => SignedBitCount::new::<11>(),
2305..=4608 => SignedBitCount::new::<12>(),
4609.. => SignedBitCount::new::<13>(),
};
let (order, lp_coeffs) = compute_best_order(
bits_per_sample,
precision,
channel
.len()
.try_into()
.expect("excessive samples for subframe"),
lp_coefficients(autocorrelate(
options.window.apply(window, windowed, channel),
max_lpc_order,
)),
)?;
Self::quantize(order, lp_coeffs, precision)
}
fn quantize(
order: NonZero<u8>,
coeffs: ArrayVec<f64, MAX_LPC_COEFFS>,
precision: SignedBitCount<15>,
) -> Result<Self, Error> {
const MAX_SHIFT: i32 = (1 << 4) - 1;
const MIN_SHIFT: i32 = -(1 << 4);
debug_assert!(coeffs.len() == usize::from(order.get()));
let max_coeff = (1 << (u32::from(precision) - 1)) - 1;
let min_coeff = -(1 << (u32::from(precision) - 1));
let l = coeffs
.iter()
.map(|c| c.abs())
.max_by(|x, y| x.total_cmp(y))
.filter(|l| *l > 0.0)
.ok_or(Error::ZeroLpCoefficients)?;
let mut error = 0.0;
match ((u32::from(precision) - 1) as i32 - ((l.log2().floor()) as i32) - 1).min(MAX_SHIFT) {
shift @ 0.. => {
let shift = shift as u32;
Ok(Self {
order,
precision,
shift,
coefficients: coeffs
.into_iter()
.map(|lp_coeff| {
let sum: f64 = lp_coeff.mul_add((1 << shift) as f64, error);
let qlp_coeff = (sum.round() as i32).clamp(min_coeff, max_coeff);
error = sum - (qlp_coeff as f64);
qlp_coeff
})
.collect(),
})
}
shift @ MIN_SHIFT..0 => {
let shift = -shift as u32;
Ok(Self {
order,
precision,
shift: 0,
coefficients: coeffs
.into_iter()
.map(|lp_coeff| {
let sum: f64 = (lp_coeff / (1 << shift) as f64) + error;
let qlp_coeff = (sum.round() as i32).clamp(min_coeff, max_coeff);
error = sum - (qlp_coeff as f64);
qlp_coeff
})
.collect(),
})
}
..MIN_SHIFT => Err(Error::LpNegativeShiftError),
}
}
}
#[test]
fn test_quantization() {
let order = NonZero::new(4).unwrap();
let quantized = LpcParameters::quantize(
order,
arrayvec![0.797774, -0.045362, -0.050136, -0.054254],
SignedBitCount::new::<10>(),
)
.unwrap();
assert_eq!(quantized.order, order);
assert_eq!(quantized.precision, SignedBitCount::new::<10>());
assert_eq!(quantized.shift, 9);
assert_eq!(quantized.coefficients, arrayvec![408, -23, -25, -28]);
let quantized = LpcParameters::quantize(
order,
arrayvec![-0.054687, -0.953216, -0.027115, 0.033537],
SignedBitCount::new::<10>(),
)
.unwrap();
assert_eq!(quantized.order, order);
assert_eq!(quantized.precision, SignedBitCount::new::<10>());
assert_eq!(quantized.shift, 9);
assert_eq!(quantized.coefficients, arrayvec![-28, -488, -14, 17]);
assert!(matches!(
LpcParameters::quantize(
order,
arrayvec![0.0, 0.0, 0.0, 0.0],
SignedBitCount::new::<10>(),
),
Err(Error::ZeroLpCoefficients)
));
let quantized = LpcParameters::quantize(
order,
arrayvec![-0.1, 0.1, 10000000.0, -0.2],
SignedBitCount::new::<10>(),
)
.unwrap();
assert_eq!(quantized.order, order);
assert_eq!(quantized.precision, SignedBitCount::new::<10>());
assert_eq!(quantized.shift, 0);
assert_eq!(quantized.coefficients, arrayvec![0, 0, 305, 0]);
assert!(matches!(
LpcParameters::quantize(
order,
arrayvec![-0.1, 0.1, 100000000.0, -0.2],
SignedBitCount::new::<10>(),
),
Err(Error::LpNegativeShiftError)
));
}
fn autocorrelate(
windowed: &[f64],
max_lpc_order: NonZero<u8>,
) -> ArrayVec<f64, { MAX_LPC_COEFFS + 1 }> {
debug_assert!(usize::from(max_lpc_order.get()) < MAX_LPC_COEFFS);
let mut tail = windowed;
let mut autocorrelated = ArrayVec::default();
for _ in 0..=max_lpc_order.get() {
if tail.is_empty() {
return autocorrelated;
} else {
autocorrelated.push(windowed.iter().zip(tail).map(|(x, y)| x * y).sum());
tail.split_off_first();
}
}
autocorrelated
}
#[test]
fn test_autocorrelation() {
assert_eq!(
autocorrelate(&[1.0], NonZero::new(1).unwrap()),
arrayvec![1.0]
);
assert_eq!(
autocorrelate(&[1.0, 2.0, 3.0, 4.0, 5.0], NonZero::new(4).unwrap()),
arrayvec![55.0, 40.0, 26.0, 14.0, 5.0],
);
assert_eq!(
autocorrelate(
&[
0.0, 16.0, 31.0, 44.0, 54.0, 61.0, 64.0, 63.0, 58.0, 49.0, 38.0, 24.0, 8.0, -8.0,
-24.0, -38.0, -49.0, -58.0, -63.0, -64.0, -61.0, -54.0, -44.0, -31.0, -16.0,
],
NonZero::new(4).unwrap()
),
arrayvec![51408.0, 49792.0, 45304.0, 38466.0, 29914.0],
)
}
#[derive(Debug)]
struct LpCoeff {
coeffs: ArrayVec<f64, MAX_LPC_COEFFS>,
error: f64,
}
fn lp_coefficients(
autocorrelated: ArrayVec<f64, { MAX_LPC_COEFFS + 1 }>,
) -> ArrayVec<LpCoeff, MAX_LPC_COEFFS> {
match autocorrelated.len() {
0 | 1 => panic!("must have at least 2 autocorrelation values"),
_ => {
let k = autocorrelated[1] / autocorrelated[0];
let mut lp_coefficients = arrayvec![LpCoeff {
coeffs: arrayvec![k],
error: autocorrelated[0] * (1.0 - k.powi(2)),
}];
for i in 1..(autocorrelated.len() - 1) {
if let [prev @ .., next] = &autocorrelated[0..=i + 1] {
let LpCoeff { coeffs, error } = lp_coefficients.last().unwrap();
let q = next
- prev
.iter()
.rev()
.zip(coeffs)
.map(|(x, y)| x * y)
.sum::<f64>();
let k = q / error;
lp_coefficients.push(LpCoeff {
coeffs: coeffs
.iter()
.zip(coeffs.iter().rev().map(|c| k * c))
.map(|(c1, c2)| c1 - c2)
.chain(std::iter::once(k))
.collect(),
error: error * (1.0 - k.powi(2)),
});
}
}
lp_coefficients
}
}
}
#[allow(unused)]
macro_rules! assert_float_approx {
($a:expr, $b:expr) => {{
let a = $a;
let b = $b;
assert!((a - b).abs() < 1.0e-6, "{a} != {b}");
}};
}
#[test]
fn test_lp_coefficients_1() {
let lp_coeffs = lp_coefficients(arrayvec![55.0, 40.0, 26.0, 14.0, 5.0]);
assert_eq!(lp_coeffs.len(), 4);
assert_float_approx!(lp_coeffs[0].error, 25.909091);
assert_float_approx!(lp_coeffs[1].error, 25.540351);
assert_float_approx!(lp_coeffs[2].error, 25.316142);
assert_float_approx!(lp_coeffs[3].error, 25.241623);
assert_eq!(lp_coeffs[0].coeffs.len(), 1);
assert_float_approx!(lp_coeffs[0].coeffs[0], 0.727273);
assert_eq!(lp_coeffs[1].coeffs.len(), 2);
assert_float_approx!(lp_coeffs[1].coeffs[0], 0.814035);
assert_float_approx!(lp_coeffs[1].coeffs[1], -0.119298);
assert_eq!(lp_coeffs[2].coeffs.len(), 3);
assert_float_approx!(lp_coeffs[2].coeffs[0], 0.802858);
assert_float_approx!(lp_coeffs[2].coeffs[1], -0.043028);
assert_float_approx!(lp_coeffs[2].coeffs[2], -0.093694);
assert_eq!(lp_coeffs[3].coeffs.len(), 4);
assert_float_approx!(lp_coeffs[3].coeffs[0], 0.797774);
assert_float_approx!(lp_coeffs[3].coeffs[1], -0.045362);
assert_float_approx!(lp_coeffs[3].coeffs[2], -0.050136);
assert_float_approx!(lp_coeffs[3].coeffs[3], -0.054254);
}
#[test]
fn test_lp_coefficients_2() {
let lp_coeffs = lp_coefficients(arrayvec![51408.0, 49792.0, 45304.0, 38466.0, 29914.0]);
assert_eq!(lp_coeffs.len(), 4);
assert_float_approx!(lp_coeffs[0].error, 3181.201369);
assert_float_approx!(lp_coeffs[1].error, 495.815931);
assert_float_approx!(lp_coeffs[2].error, 495.161449);
assert_float_approx!(lp_coeffs[3].error, 494.604514);
assert_eq!(lp_coeffs[0].coeffs.len(), 1);
assert_float_approx!(lp_coeffs[0].coeffs[0], 0.968565);
assert_eq!(lp_coeffs[1].coeffs.len(), 2);
assert_float_approx!(lp_coeffs[1].coeffs[0], 1.858456);
assert_float_approx!(lp_coeffs[1].coeffs[1], -0.918772);
assert_eq!(lp_coeffs[2].coeffs.len(), 3);
assert_float_approx!(lp_coeffs[2].coeffs[0], 1.891837);
assert_float_approx!(lp_coeffs[2].coeffs[1], -0.986293);
assert_float_approx!(lp_coeffs[2].coeffs[2], 0.036332);
assert_eq!(lp_coeffs[3].coeffs.len(), 4);
assert_float_approx!(lp_coeffs[3].coeffs[0], 1.890618);
assert_float_approx!(lp_coeffs[3].coeffs[1], -0.953216);
assert_float_approx!(lp_coeffs[3].coeffs[2], -0.027115);
assert_float_approx!(lp_coeffs[3].coeffs[3], 0.033537);
}
fn subframe_bits_by_order(
bits_per_sample: SignedBitCount<32>,
precision: SignedBitCount<15>,
sample_count: u16,
coeffs: ArrayVec<LpCoeff, MAX_LPC_COEFFS>,
) -> impl Iterator<Item = (f64, u8, ArrayVec<f64, MAX_LPC_COEFFS>)> {
debug_assert!(sample_count > 0);
let error_scale = 0.5 / f64::from(sample_count);
coeffs
.into_iter()
.take_while(|coeffs| coeffs.error > 0.0)
.zip(1..)
.map(move |(LpCoeff { coeffs, error }, order)| {
let header_bits =
u32::from(order) * (u32::from(bits_per_sample) + u32::from(precision));
let bits_per_residual =
(error * error_scale).ln() / (2.0 * std::f64::consts::LN_2).max(0.0);
let subframe_bits = bits_per_residual.mul_add(
f64::from(sample_count - u16::from(order)),
f64::from(header_bits),
);
(subframe_bits, order, coeffs)
})
}
fn compute_best_order(
bits_per_sample: SignedBitCount<32>,
precision: SignedBitCount<15>,
sample_count: u16,
coeffs: ArrayVec<LpCoeff, MAX_LPC_COEFFS>,
) -> Result<(NonZero<u8>, ArrayVec<f64, MAX_LPC_COEFFS>), Error> {
subframe_bits_by_order(bits_per_sample, precision, sample_count, coeffs)
.min_by(|(x, _, _), (y, _, _)| x.total_cmp(y))
.and_then(|(_, order, coeffs)| Some((NonZero::new(order)?, coeffs)))
.ok_or(Error::NoBestLpcOrder)
}
#[test]
fn test_compute_best_order() {
let mut bits = subframe_bits_by_order(
SignedBitCount::new::<16>(),
SignedBitCount::new::<5>(),
20,
[3181.201369, 495.815931, 495.161449, 494.604514]
.into_iter()
.map(|error| LpCoeff {
coeffs: ArrayVec::default(),
error,
})
.collect(),
)
.map(|t| t.0);
assert_float_approx!(bits.next().unwrap(), 80.977565);
assert_float_approx!(bits.next().unwrap(), 74.685594);
assert_float_approx!(bits.next().unwrap(), 93.853530);
assert_float_approx!(bits.next().unwrap(), 113.025628);
let mut bits = subframe_bits_by_order(
SignedBitCount::new::<16>(),
SignedBitCount::new::<10>(),
4096,
[15000.0, 25000.0, 20000.0, 30000.0]
.into_iter()
.map(|error| LpCoeff {
coeffs: ArrayVec::default(),
error,
})
.collect(),
)
.map(|t| t.0);
assert_float_approx!(bits.next().unwrap(), 1812.801817);
assert_float_approx!(bits.next().unwrap(), 3346.934051);
assert_float_approx!(bits.next().unwrap(), 2713.303385);
assert_float_approx!(bits.next().unwrap(), 3935.492805);
}
fn write_residuals<W: BitWrite>(
options: &EncoderOptions,
writer: &mut W,
predictor_order: usize,
residuals: &[i32],
) -> Result<(), Error> {
use crate::stream::ResidualPartitionHeader;
use bitstream_io::{BitCount, ToBitStream};
const MAX_PARTITIONS: usize = 64;
#[derive(Debug)]
struct Partition<'r, const RICE_MAX: u32> {
header: ResidualPartitionHeader<RICE_MAX>,
residuals: &'r [i32],
}
impl<'r, const RICE_MAX: u32> Partition<'r, RICE_MAX> {
fn new(partition: &'r [i32], estimated_bits: &mut u32) -> Option<Self> {
let partition_samples = partition.len() as u16;
if partition_samples == 0 {
return None;
}
let partition_sum = partition
.iter()
.map(|i| u64::from(i.unsigned_abs()))
.sum::<u64>();
if partition_sum > 0 {
let rice = if partition_sum > partition_samples.into() {
let bits_needed = ((partition_sum as f64) / f64::from(partition_samples))
.log2()
.ceil() as u32;
match BitCount::try_from(bits_needed).ok().filter(|rice| {
u32::from(*rice) < u32::from(BitCount::<RICE_MAX>::new::<RICE_MAX>())
}) {
Some(rice) => rice,
None => {
let escape_size = (partition
.iter()
.map(|i| u64::from(i.unsigned_abs()))
.sum::<u64>()
.ilog2()
+ 2)
.try_into()
.ok()?;
*estimated_bits +=
u32::from(escape_size) * u32::from(partition_samples);
return Some(Self {
header: ResidualPartitionHeader::Escaped { escape_size },
residuals: partition,
});
}
}
} else {
BitCount::new::<0>()
};
let partition_size: u32 = 4u32
+ ((1 + u32::from(rice)) * u32::from(partition_samples))
+ if u32::from(rice) > 0 {
u32::try_from(partition_sum >> (u32::from(rice) - 1)).ok()?
} else {
u32::try_from(partition_sum << 1).ok()?
}
- (u32::from(partition_samples) / 2);
*estimated_bits += partition_size;
Some(Partition {
header: ResidualPartitionHeader::Standard { rice },
residuals: partition,
})
} else {
Some(Partition {
header: ResidualPartitionHeader::Constant,
residuals: partition,
})
}
}
}
impl<const RICE_MAX: u32> ToBitStream for Partition<'_, RICE_MAX> {
type Error = std::io::Error;
#[inline]
fn to_writer<W: BitWrite + ?Sized>(&self, w: &mut W) -> Result<(), Self::Error> {
w.build(&self.header)?;
match self.header {
ResidualPartitionHeader::Standard { rice } => {
let mask = rice.mask_lsb();
self.residuals.iter().try_for_each(|s| {
let (msb, lsb) = mask(if s.is_negative() {
((-*s as u32 - 1) << 1) + 1
} else {
(*s as u32) << 1
});
w.write_unary::<1>(msb)?;
w.write_checked(lsb)
})?;
}
ResidualPartitionHeader::Escaped { escape_size } => {
self.residuals
.iter()
.try_for_each(|s| w.write_signed_counted(escape_size, *s))?;
}
ResidualPartitionHeader::Constant => { }
}
Ok(())
}
}
fn best_partitions<'r, const RICE_MAX: u32>(
options: &EncoderOptions,
block_size: usize,
residuals: &'r [i32],
) -> ArrayVec<Partition<'r, RICE_MAX>, MAX_PARTITIONS> {
(0..=block_size.trailing_zeros().min(options.max_partition_order))
.map(|partition_order| 1 << partition_order)
.take_while(|partition_count: &usize| partition_count.is_power_of_two())
.filter_map(|partition_count| {
let mut estimated_bits = 0;
let partitions = residuals
.rchunks(block_size / partition_count)
.rev()
.map(|partition| Partition::new(partition, &mut estimated_bits))
.collect::<Option<ArrayVec<_, MAX_PARTITIONS>>>()
.filter(|p| !p.is_empty() && p.len().is_power_of_two())?;
Some((partitions, estimated_bits))
})
.min_by_key(|(_, estimated_bits)| *estimated_bits)
.map(|(partitions, _)| partitions)
.unwrap_or_else(|| {
std::iter::once(Partition {
header: ResidualPartitionHeader::Escaped {
escape_size: SignedBitCount::new::<0b11111>(),
},
residuals,
})
.collect()
})
}
fn write_partitions<const RICE_MAX: u32, W: BitWrite>(
writer: &mut W,
partitions: ArrayVec<Partition<'_, RICE_MAX>, MAX_PARTITIONS>,
) -> Result<(), Error> {
writer.write::<4, u32>(partitions.len().ilog2())?; for partition in partitions {
writer.build(&partition)?;
}
Ok(())
}
#[inline]
fn try_shrink_header<const RICE_MAX: u32, const RICE_NEW_MAX: u32>(
header: ResidualPartitionHeader<RICE_MAX>,
) -> Option<ResidualPartitionHeader<RICE_NEW_MAX>> {
Some(match header {
ResidualPartitionHeader::Standard { rice } => ResidualPartitionHeader::Standard {
rice: rice.try_map(|r| (r < RICE_NEW_MAX).then_some(r))?,
},
ResidualPartitionHeader::Escaped { escape_size } => {
ResidualPartitionHeader::Escaped { escape_size }
}
ResidualPartitionHeader::Constant => ResidualPartitionHeader::Constant,
})
}
enum CodingMethod<'p> {
Rice(ArrayVec<Partition<'p, 0b1111>, MAX_PARTITIONS>),
Rice2(ArrayVec<Partition<'p, 0b11111>, MAX_PARTITIONS>),
}
fn try_reduce_rice(
partitions: ArrayVec<Partition<'_, 0b11111>, MAX_PARTITIONS>,
) -> CodingMethod<'_> {
match partitions
.iter()
.map(|Partition { header, residuals }| {
try_shrink_header(*header).map(|header| Partition { header, residuals })
})
.collect()
{
Some(partitions) => CodingMethod::Rice(partitions),
None => CodingMethod::Rice2(partitions),
}
}
let block_size = predictor_order + residuals.len();
if options.use_rice2 {
match try_reduce_rice(best_partitions(options, block_size, residuals)) {
CodingMethod::Rice(partitions) => {
writer.write::<2, u8>(0)?; write_partitions(writer, partitions)
}
CodingMethod::Rice2(partitions) => {
writer.write::<2, u8>(1)?; write_partitions(writer, partitions)
}
}
} else {
let partitions = best_partitions::<0b1111>(options, block_size, residuals);
writer.write::<2, u8>(0)?; write_partitions(writer, partitions)
}
}
fn try_join<A, B, RA, RB, E>(oper_a: A, oper_b: B) -> Result<(RA, RB), E>
where
A: FnOnce() -> Result<RA, E> + Send,
B: FnOnce() -> Result<RB, E> + Send,
RA: Send,
RB: Send,
E: Send,
{
let (a, b) = join(oper_a, oper_b);
Ok((a?, b?))
}
#[cfg(feature = "rayon")]
use rayon::join;
#[cfg(not(feature = "rayon"))]
fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
where
A: FnOnce() -> RA + Send,
B: FnOnce() -> RB + Send,
RA: Send,
RB: Send,
{
(oper_a(), oper_b())
}
#[cfg(feature = "rayon")]
fn vec_map<T, U, F>(src: Vec<T>, f: F) -> Vec<U>
where
T: Send,
U: Send,
F: Fn(T) -> U + Send + Sync,
{
use rayon::iter::{IntoParallelIterator, ParallelIterator};
src.into_par_iter().map(f).collect()
}
#[cfg(not(feature = "rayon"))]
fn vec_map<T, U, F>(src: Vec<T>, f: F) -> Vec<U>
where
T: Send,
U: Send,
F: Fn(T) -> U + Send + Sync,
{
src.into_iter().map(f).collect()
}
fn exact_div<N>(n: N, rhs: N) -> Option<N>
where
N: std::ops::Div<Output = N> + std::ops::Rem<Output = N> + std::cmp::PartialEq + Copy + Default,
{
(n % rhs == N::default()).then_some(n / rhs)
}