Trait constriction::stream::model::IterableEntropyModel

source ·
pub trait IterableEntropyModel<'m, const PRECISION: usize>: EntropyModel<PRECISION> {
    // Required method
    fn symbol_table(
        &'m self,
    ) -> impl Iterator<Item = (Self::Symbol, Self::Probability, <Self::Probability as BitArray>::NonZero)>;

    // Provided methods
    fn floating_point_symbol_table<F>(
        &'m self,
    ) -> impl Iterator<Item = (Self::Symbol, F, F)>
       where F: FloatCore + From<Self::Probability> + 'm,
             Self::Probability: Into<F> { ... }
    fn entropy_base2<F>(&'m self) -> F
       where F: Float + Sum,
             Self::Probability: Into<F> { ... }
    fn cross_entropy_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
       where F: Float + Sum,
             Self::Probability: Into<F> { ... }
    fn reverse_cross_entropy_base2<F>(
        &'m self,
        p: impl IntoIterator<Item = F>,
    ) -> F
       where F: Float + Sum,
             Self::Probability: Into<F> { ... }
    fn kl_divergence_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
       where F: Float + Sum,
             Self::Probability: Into<F> { ... }
    fn reverse_kl_divergence_base2<F>(
        &'m self,
        p: impl IntoIterator<Item = F>,
    ) -> F
       where F: Float + Sum,
             Self::Probability: Into<F> { ... }
    fn to_generic_encoder_model(
        &'m self,
    ) -> NonContiguousCategoricalEncoderModel<Self::Symbol, Self::Probability, PRECISION>
       where Self::Symbol: Hash + Eq { ... }
    fn to_generic_decoder_model(
        &'m self,
    ) -> NonContiguousCategoricalDecoderModel<Self::Symbol, Self::Probability, Vec<(Self::Probability, Self::Symbol)>, PRECISION>
       where Self::Symbol: Clone { ... }
    fn to_generic_lookup_decoder_model(
        &'m self,
    ) -> NonContiguousLookupDecoderModel<Self::Symbol, Self::Probability, Vec<(Self::Probability, Self::Symbol)>, Box<[Self::Probability]>, PRECISION>
       where Self::Probability: Into<usize>,
             usize: AsPrimitive<Self::Probability>,
             Self::Symbol: Clone + Default { ... }
}
Expand description

A trait for EntropyModels that can be serialized into a common format.

The method symbol_table iterates over all symbols with nonzero probability under the entropy. The iteration occurs in uniquely defined order of increasing left-sided cumulative probability distribution of the symbols. All EntropyModels for which such iteration can be implemented efficiently should implement this trait. EntropyModels for which such iteration would require extra work (e.g., sorting symbols by left-sided cumulative distribution) should not implement this trait so that callers can assume that calling symbol_table is cheap.

The main advantage of implementing this trait is that it provides default implementations of conversions to various other EncoderModels and DecoderModels, see to_generic_encoder_model, to_generic_decoder_model, and to_generic_lookup_decoder_model.

Required Methods§

source

fn symbol_table( &'m self, ) -> impl Iterator<Item = (Self::Symbol, Self::Probability, <Self::Probability as BitArray>::NonZero)>

Iterates over all symbols in the unique order that is consistent with the cumulative distribution.

The iterator iterates in order of increasing cumulative.

This method may be used, e.g., to export the model into a serializable format. It is also used internally by constructors that create a different but equivalent representation of the same entropy model (e.g., to construct a ContiguousLookupDecoderModel or NonContiguousLookupDecoderModel from some EncoderModel).

§Example
use constriction::stream::model::{
    IterableEntropyModel, SmallNonContiguousCategoricalDecoderModel
};

let symbols = vec!['a', 'b', 'x', 'y'];
let probabilities = vec![0.125, 0.5, 0.25, 0.125]; // Can all be represented without rounding.
let model = SmallNonContiguousCategoricalDecoderModel
    ::from_symbols_and_floating_point_probabilities_fast(
        symbols.iter().cloned(),
        &probabilities,
        None
    ).unwrap();

// Print a table representation of this entropy model (e.g., for debugging).
dbg!(model.symbol_table().collect::<Vec<_>>());

// Create a lookup model. This method is provided by the trait `IterableEntropyModel`.
let lookup_decoder_model = model.to_generic_lookup_decoder_model();
§See also

Provided Methods§

source

fn floating_point_symbol_table<F>( &'m self, ) -> impl Iterator<Item = (Self::Symbol, F, F)>
where F: FloatCore + From<Self::Probability> + 'm, Self::Probability: Into<F>,

Similar to symbol_table, but yields both cumulatives and probabilities in floating point representation.

The conversion to floats is guaranteed to be lossless due to the trait bound F: From<Self::Probability>.

TODO: test

source

fn entropy_base2<F>(&'m self) -> F
where F: Float + Sum, Self::Probability: Into<F>,

Returns the entropy in units of bits (i.e., base 2).

The entropy is the expected amortized bit rate per symbol of an optimal lossless entropy coder, assuming that the data is indeed distributed according to the model.

Note that calling this method on a LeakilyQuantizedDistribution will return the entropy after quantization, not the differential entropy of the underlying continuous probability distribution.

§See also
source

fn cross_entropy_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
where F: Float + Sum, Self::Probability: Into<F>,

Returns the cross entropy between argument p and this model in units of bits (i.e., base 2).

This is the expected amortized bit rate per symbol that an optimal coder will achieve when using this model on a data source that draws symbols from the provided probability distribution p.

The cross entropy is defined as H(p, self) = - sum_i p[i] * log2(self[i]) where p is provided as an argument and self[i] denotes the corresponding probabilities of the model. Note that self[i] is never zero for models in the constriction library, so the logarithm in the (forward) cross entropy can never be infinite.

The argument p must yield a sequence of probabilities (nonnegative values that sum to 1) with the correct length and order to be compatible with the model.

§See also
source

fn reverse_cross_entropy_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
where F: Float + Sum, Self::Probability: Into<F>,

Returns the cross entropy between this model and argument p in units of bits (i.e., base 2).

This method is provided mostly for completeness. You’re more likely to want to calculate cross_entropy_base2.

The reverse cross entropy is defined as H(self, p) = - sum_i self[i] * log2(p[i]) where p is provided as an argument and self[i] denotes the corresponding probabilities of the model.

The argument p must yield a sequence of nonzero probabilities (that sum to 1) with the correct length and order to be compatible with the model.

§See also
source

fn kl_divergence_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
where F: Float + Sum, Self::Probability: Into<F>,

Returns Kullback-Leibler divergence D_KL(p || self)

This is the expected overhead (due to model quantization) in bit rate per symbol that an optimal coder will incur when using this model on a data source that draws symbols from the provided probability distribution p (which this model is supposed to approximate).

The KL-divergence is defined as D_KL(p || self) = - sum_i p[i] * log2(self[i] / p[i]), where p is provided as an argument and self[i] denotes the corresponding probabilities of the model. Any term in the sum where p[i] is exactly zero does not contribute (regardless of whether or not self[i] would also be zero).

The argument p must yield a sequence of probabilities (nonnegative values that sum to 1) with the correct length and order to be compatible with the model.

§See also
source

fn reverse_kl_divergence_base2<F>(&'m self, p: impl IntoIterator<Item = F>) -> F
where F: Float + Sum, Self::Probability: Into<F>,

Returns reverse Kullback-Leibler divergence, i.e., D_KL(self || p)

This method is provided mostly for completeness. You’re more likely to want to calculate kl_divergence_base2.

The reverse KL-divergence is defined as D_KL(self || p) = - sum_i self[i] * log2(p[i] / self[i]) where p is provided as an argument and self[i] denotes the corresponding probabilities of the model.

The argument p must yield a sequence of nonzero probabilities (that sum to 1) with the correct length and order to be compatible with the model.

§See also
source

fn to_generic_encoder_model( &'m self, ) -> NonContiguousCategoricalEncoderModel<Self::Symbol, Self::Probability, PRECISION>
where Self::Symbol: Hash + Eq,

Creates an EncoderModel from this EntropyModel

This is a fallback method that should only be used if no more specialized conversions are available. It generates a NonContiguousCategoricalEncoderModel with the same probabilities and left-sided cumulatives as self. Note that a NonContiguousCategoricalEncoderModel is very generic and therefore not particularly optimized. Thus, before calling this method first check:

  • if the original Self type already implements EncoderModel (some types implement both EncoderModel and DecoderModel); or
  • if the Self type has some inherent method with a name like to_encoder_model; if it does, that method probably returns an implementation of EncoderModel that is better optimized for your use case.
source

fn to_generic_decoder_model( &'m self, ) -> NonContiguousCategoricalDecoderModel<Self::Symbol, Self::Probability, Vec<(Self::Probability, Self::Symbol)>, PRECISION>
where Self::Symbol: Clone,

Creates a DecoderModel from this EntropyModel

This is a fallback method that should only be used if no more specialized conversions are available. It generates a NonContiguousCategoricalDecoderModel with the same probabilities and left-sided cumulatives as self. Note that a NonContiguousCategoricalEncoderModel is very generic and therefore not particularly optimized. Thus, before calling this method first check:

  • if the original Self type already implements DecoderModel (some types implement both EncoderModel and DecoderModel); or
  • if the Self type has some inherent method with a name like to_decoder_model; if it does, that method probably returns an implementation of DecoderModel that is better optimized for your use case.
source

fn to_generic_lookup_decoder_model( &'m self, ) -> NonContiguousLookupDecoderModel<Self::Symbol, Self::Probability, Vec<(Self::Probability, Self::Symbol)>, Box<[Self::Probability]>, PRECISION>

Creates a DecoderModel from this EntropyModel

This is a fallback method that should only be used if no more specialized conversions are available. It generates a ContiguousLookupDecoderModel or NonContiguousLookupDecoderModel that makes no assumption about contiguity of the support. Thus, before calling this method first check if the Self type has some inherent method with a name like to_lookup_decoder_model. If it does, that method probably returns a LookupDecoderModel that is better optimized for your use case.

Object Safety§

This trait is not object safe.

Implementations on Foreign Types§

source§

impl<'m, M, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for &'m M
where M: IterableEntropyModel<'m, PRECISION>,

source§

fn symbol_table( &'m self, ) -> impl Iterator<Item = (Self::Symbol, Self::Probability, <Self::Probability as BitArray>::NonZero)>

source§

fn entropy_base2<F>(&'m self) -> F
where F: Float + Sum, Self::Probability: Into<F>,

source§

fn to_generic_encoder_model( &'m self, ) -> NonContiguousCategoricalEncoderModel<Self::Symbol, Self::Probability, PRECISION>
where Self::Symbol: Hash + Eq,

source§

fn to_generic_decoder_model( &'m self, ) -> NonContiguousCategoricalDecoderModel<Self::Symbol, Self::Probability, Vec<(Self::Probability, Self::Symbol)>, PRECISION>
where Self::Symbol: Clone,

Implementors§

source§

impl<'m, Probability, Cdf, LookupTable, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for ContiguousLookupDecoderModel<Probability, Cdf, LookupTable, PRECISION>
where Probability: BitArray + Into<usize>, usize: AsPrimitive<Probability>, Cdf: AsRef<[Probability]>, LookupTable: AsRef<[Probability]>,

source§

impl<'m, Probability, Cdf, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for ContiguousCategoricalEntropyModel<Probability, Cdf, PRECISION>
where Probability: BitArray, Cdf: AsRef<[Probability]>,

source§

impl<'m, Probability, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for UniformModel<Probability, PRECISION>
where Probability: AsPrimitive<usize> + BitArray, usize: AsPrimitive<Probability>,

source§

impl<'m, Symbol, Probability, Cdf, LookupTable, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for NonContiguousLookupDecoderModel<Symbol, Probability, Cdf, LookupTable, PRECISION>
where Symbol: Clone + 'm, Probability: BitArray + Into<usize>, usize: AsPrimitive<Probability>, Cdf: AsRef<[(Probability, Symbol)]>, LookupTable: AsRef<[Probability]>,

source§

impl<'m, Symbol, Probability, Cdf, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for NonContiguousCategoricalDecoderModel<Symbol, Probability, Cdf, PRECISION>
where Symbol: Clone + 'm, Probability: BitArray, Cdf: AsRef<[(Probability, Symbol)]>,

source§

impl<'m, Symbol, Probability, D, const PRECISION: usize> IterableEntropyModel<'m, PRECISION> for LeakilyQuantizedDistribution<f64, Symbol, Probability, D, PRECISION>
where f64: AsPrimitive<Probability>, Symbol: PrimInt + AsPrimitive<Probability> + AsPrimitive<usize> + Into<f64> + WrappingSub, Probability: BitArray + Into<f64>, D: Distribution + 'm, D::Value: AsPrimitive<Symbol>,