use crate::meta::attribute::*;
use crate::compression::{Compression};
use crate::math::*;
use std::io::{Read, Seek, Write, Cursor};
use crate::error::{Result, Error, UnitResult, usize_to_u64};
use crate::meta::{MetaData, TileIndices};
use crate::io::{Tracking};
use crate::io::Data;
use smallvec::SmallVec;
use std::ops::Range;
use crate::block::{BlockIndex, UncompressedBlock};
use crate::image::*;
use crate::meta::header::Header;
use crate::prelude::common::meta::Headers;
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct LineSlice<T> {
pub location: LineIndex,
pub value: T,
}
pub type LineRef<'s> = LineSlice<&'s [u8]>;
pub type LineRefMut<'s> = LineSlice<&'s mut [u8]>;
#[derive(Clone, Copy, Eq, PartialEq, Debug, Hash)]
pub struct LineIndex {
pub layer: usize,
pub channel: usize,
pub level: Vec2<usize>,
pub position: Vec2<usize>,
pub sample_count: usize,
}
#[inline]
#[must_use]
pub fn read_all_lines_from_buffered<T>(
read: impl Read + Send, new: impl Fn(&[Header]) -> Result<T>,
mut insert: impl FnMut(&mut T, &[Header], LineRef<'_>) -> UnitResult,
options: ReadOptions<impl OnReadProgress>,
) -> Result<T>
{
let insert = |value: &mut T, headers: &[Header], decompressed: UncompressedBlock| {
let header = headers.get(decompressed.index.layer)
.ok_or(Error::invalid("chunk index"))?;
for (bytes, line) in LineIndex::lines_in_block(decompressed.index, header) {
insert(value, headers, LineSlice { location: line, value: &decompressed.data[bytes] })?; }
Ok(())
};
crate::block::read_all_blocks_from_buffered(read, new, insert, options)
}
#[inline]
#[must_use]
pub fn read_filtered_lines_from_buffered<T>(
read: impl Read + Seek + Send, new: impl Fn(&[Header]) -> Result<T>, filter: impl Fn(&T, (usize, &Header), (usize, &TileIndices)) -> bool,
mut insert: impl FnMut(&mut T, &[Header], LineRef<'_>) -> UnitResult,
options: ReadOptions<impl OnReadProgress>,
) -> Result<T>
{
let insert = |value: &mut T, headers: &[Header], decompressed: UncompressedBlock| {
let header = headers.get(decompressed.index.layer)
.ok_or(Error::invalid("chunk index"))?;
for (bytes, line) in LineIndex::lines_in_block(decompressed.index, header) {
insert(value, headers, LineSlice { location: line, value: &decompressed.data[bytes] })?; }
Ok(())
};
crate::block::read_filtered_blocks_from_buffered(read, new, filter, insert, options)
}
#[inline]
#[must_use]
pub fn write_all_lines_to_buffered(
write: impl Write + Seek, headers: Headers,
get_line: impl Sync + Fn(&[Header], LineRefMut<'_>), options: WriteOptions<impl OnWriteProgress>,
) -> UnitResult
{
let get_block = |headers: &[Header], block_index: BlockIndex| {
let header: &Header = &headers.get(block_index.layer).expect("invalid block index");
let bytes = block_index.pixel_size.area() * header.channels.bytes_per_pixel;
let mut block_bytes = vec![0_u8; bytes];
for (byte_range, line_index) in LineIndex::lines_in_block(block_index, header) {
get_line(headers, LineRefMut {
value: &mut block_bytes[byte_range],
location: line_index,
});
}
block_bytes
};
write_all_tiles_to_buffered(write, headers, get_block, options)
}
#[inline]
#[must_use]
pub fn write_all_tiles_to_buffered(
write: impl Write + Seek,
mut headers: Headers,
get_tile: impl Sync + Fn(&[Header], BlockIndex) -> Vec<u8>, mut options: WriteOptions<impl OnWriteProgress>,
) -> UnitResult
{
let has_compression = headers.iter() .any(|header| header.compression != Compression::Uncompressed);
if !options.parallel_compression || !has_compression {
for header in &mut headers {
if header.line_order == LineOrder::Unspecified {
header.line_order = LineOrder::Increasing;
}
}
}
let mut write = Tracking::new(write);
MetaData::write_validating_to_buffered(&mut write, headers.as_slice(), options.pedantic)?;
let offset_table_start_byte = write.byte_position();
let offset_table_size: usize = headers.iter()
.map(|header| header.chunk_count).sum();
write.seek_write_to(write.byte_position() + offset_table_size * std::mem::size_of::<u64>())?;
let mut offset_tables: Vec<Vec<u64>> = headers.iter()
.map(|header| vec![0; header.chunk_count]).collect();
let total_chunk_count = offset_table_size as f32;
let mut processed_chunk_count = 0;
crate::block::for_compressed_blocks_in_image(headers.as_slice(), get_tile, options.parallel_compression, |chunk_index, chunk|{
offset_tables[chunk.layer_index][chunk_index] = usize_to_u64(write.byte_position()); chunk.write(&mut write, headers.as_slice())?;
options.on_progress.on_write_progressed(
processed_chunk_count as f32 / total_chunk_count, write.byte_position()
)?;
processed_chunk_count += 1;
Ok(())
})?;
debug_assert_eq!(processed_chunk_count, offset_table_size, "not all chunks were written");
write.seek_write_to(offset_table_start_byte)?;
for offset_table in offset_tables {
u64::write_slice(&mut write, offset_table.as_slice())?;
}
write.flush()?;
Ok(())
}
impl LineIndex {
#[inline]
#[must_use]
pub fn lines_in_block(block: BlockIndex, header: &Header) -> impl Iterator<Item=(Range<usize>, LineIndex)> {
struct LineIter {
layer: usize, level: Vec2<usize>, width: usize,
end_y: usize, x: usize, channel_sizes: SmallVec<[usize; 8]>,
byte: usize, channel: usize, y: usize,
};
impl Iterator for LineIter {
type Item = (Range<usize>, LineIndex);
fn next(&mut self) -> Option<Self::Item> {
if self.y < self.end_y {
let byte_len = self.channel_sizes[self.channel];
let return_value = (
(self.byte .. self.byte + byte_len),
LineIndex {
channel: self.channel,
layer: self.layer,
level: self.level,
position: Vec2(self.x, self.y),
sample_count: self.width,
}
);
{ self.byte += byte_len;
self.channel += 1;
if self.channel == self.channel_sizes.len() {
self.channel = 0;
self.y += 1;
}
}
Some(return_value)
}
else {
None
}
}
}
let channel_line_sizes: SmallVec<[usize; 8]> = header.channels.list.iter()
.map(move |channel| block.pixel_size.0 * channel.sample_type.bytes_per_sample()) .collect();
LineIter {
layer: block.layer,
level: block.level,
width: block.pixel_size.0,
x: block.pixel_position.0,
end_y: block.pixel_position.y() + block.pixel_size.height(),
channel_sizes: channel_line_sizes,
byte: 0,
channel: 0,
y: block.pixel_position.y()
}
}
}
impl<'s> LineRefMut<'s> {
#[inline]
#[must_use]
pub fn write_samples_from_slice<T: crate::io::Data>(self, slice: &[T]) -> UnitResult {
debug_assert_eq!(slice.len(), self.location.sample_count, "slice size does not match the line width");
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
T::write_slice(&mut Cursor::new(self.value), slice)
}
#[inline]
#[must_use]
pub fn write_samples<T: crate::io::Data>(self, mut get_sample: impl FnMut(usize) -> T) -> UnitResult {
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
let mut write = Cursor::new(self.value);
for index in 0..self.location.sample_count {
T::write(get_sample(index), &mut write)?;
}
Ok(())
}
}
impl LineRef<'_> {
pub fn read_samples_into_slice<T: crate::io::Data>(self, slice: &mut [T]) -> UnitResult {
debug_assert_eq!(slice.len(), self.location.sample_count, "slice size does not match the line width");
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
T::read_slice(&mut Cursor::new(self.value), slice)
}
pub fn read_samples<T: crate::io::Data>(&self) -> impl Iterator<Item = Result<T>> + '_ {
debug_assert_eq!(self.value.len(), self.location.sample_count * T::BYTE_SIZE, "sample type size does not match line byte size");
let mut read = self.value.clone(); (0..self.location.sample_count).map(move |_| T::read(&mut read))
}
}