Trait exr::block::writer::ChunksWriter[][src]

pub trait ChunksWriter: Sized {
    fn total_chunks_count(&self) -> usize;
fn write_chunk(
        &mut self,
        index_in_header_increasing_y: usize,
        chunk: Chunk
    ) -> UnitResult; fn on_progress<F>(
        &mut self,
        on_progress: F
    ) -> OnProgressChunkWriter<'_, Self, F>
    where
        F: FnMut(f64)
, { ... }
fn sequential_blocks_compressor<'w>(
        &'w mut self,
        meta: &'w MetaData
    ) -> SequentialBlocksCompressor<'w, Self> { ... }
fn parallel_blocks_compressor<'w>(
        &'w mut self,
        meta: &'w MetaData
    ) -> Option<ParallelBlocksCompressor<'w, Self>> { ... }
fn compress_all_blocks_sequential(
        self,
        meta: &MetaData,
        blocks: impl Iterator<Item = (usize, UncompressedBlock)>
    ) -> UnitResult { ... }
fn compress_all_blocks_parallel(
        self,
        meta: &MetaData,
        blocks: impl Iterator<Item = (usize, UncompressedBlock)>
    ) -> UnitResult { ... } }
Expand description

Write chunks to a byte destination. Then write each chunk with writer.write_chunk(chunk).

Required methods

fn total_chunks_count(&self) -> usize[src]

The total number of chunks that the complete file will contain.

fn write_chunk(
    &mut self,
    index_in_header_increasing_y: usize,
    chunk: Chunk
) -> UnitResult
[src]

Any more calls will result in an error and have no effect. If writing results in an error, the file and the writer may remain in an invalid state and should not be used further. Errors when the chunk at this index was already written.

Provided methods

fn on_progress<F>(
    &mut self,
    on_progress: F
) -> OnProgressChunkWriter<'_, Self, F> where
    F: FnMut(f64), 
[src]

Obtain a new writer that calls the specified closure for each block that is written to this writer.

fn sequential_blocks_compressor<'w>(
    &'w mut self,
    meta: &'w MetaData
) -> SequentialBlocksCompressor<'w, Self>
[src]

Obtain a new writer that can compress blocks to chunks, which are then passed to this writer.

fn parallel_blocks_compressor<'w>(
    &'w mut self,
    meta: &'w MetaData
) -> Option<ParallelBlocksCompressor<'w, Self>>
[src]

Obtain a new writer that can compress blocks to chunks on multiple threads, which are then passed to this writer. Returns none if the sequential compressor should be used instead (thread pool creation failure or too large performance overhead).

fn compress_all_blocks_sequential(
    self,
    meta: &MetaData,
    blocks: impl Iterator<Item = (usize, UncompressedBlock)>
) -> UnitResult
[src]

Compresses all blocks to the file. The index of the block must be in increasing line order within the header. Obtain iterator with MetaData::collect_ordered_blocks(...) or similar methods.

fn compress_all_blocks_parallel(
    self,
    meta: &MetaData,
    blocks: impl Iterator<Item = (usize, UncompressedBlock)>
) -> UnitResult
[src]

Compresses all blocks to the file. The index of the block must be in increasing line order within the header. Obtain iterator with MetaData::collect_ordered_blocks(...) or similar methods.

Implementors

impl<'w, W, F> ChunksWriter for OnProgressChunkWriter<'w, W, F> where
    W: 'w + ChunksWriter,
    F: FnMut(f64), 
[src]

fn total_chunks_count(&self) -> usize[src]

fn write_chunk(
    &mut self,
    index_in_header_increasing_y: usize,
    chunk: Chunk
) -> UnitResult
[src]

impl<W> ChunksWriter for ChunkWriter<W> where
    W: Write + Seek
[src]

fn total_chunks_count(&self) -> usize[src]

The total number of chunks that the complete file will contain.

fn write_chunk(
    &mut self,
    index_in_header_increasing_y: usize,
    chunk: Chunk
) -> UnitResult
[src]

Any more calls will result in an error and have no effect. If writing results in an error, the file and the writer may remain in an invalid state and should not be used further. Errors when the chunk at this index was already written.