use duplicate::duplicate_item;
use integer_encoding::{VarIntReader, VarIntWriter};
use std::io::{Read, Result, Write};
use std::ops::{Index, IndexMut, Range};
use std::slice::{Iter, SliceIndex};
#[cfg(feature = "async")]
use futures::{AsyncReadExt, AsyncWrite, AsyncWriteExt};
#[cfg(feature = "async")]
use integer_encoding::{VarIntAsyncReader, VarIntAsyncWriter};
use crate::util::{compress, decompress};
#[cfg(feature = "async")]
use crate::util::{compress_async, decompress_async};
use crate::Compression;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct Entry {
pub tile_id: u64,
pub offset: u64,
pub length: u32,
pub run_length: u32,
}
impl Entry {
pub const fn tile_id_range(&self) -> Range<u64> {
self.tile_id..self.tile_id + self.run_length as u64
}
pub const fn is_leaf_dir_entry(&self) -> bool {
self.run_length == 0
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "serde", serde(transparent))]
pub struct Directory {
entries: Vec<Entry>,
}
impl Directory {
pub const fn len(&self) -> usize {
self.entries.len()
}
pub const fn is_empty(&self) -> bool {
self.entries.is_empty()
}
#[deprecated(
since = "0.3.0",
note = "Directory implements IntoIterator trait, which should be used instead"
)]
pub fn iter(&self) -> Iter<'_, Entry> {
self.into_iter()
}
}
impl<'a> IntoIterator for &'a Directory {
type IntoIter = Iter<'a, Entry>;
type Item = &'a Entry;
fn into_iter(self) -> Self::IntoIter {
self.entries.iter()
}
}
impl Directory {
#[duplicate_item(
fn_name cfg_async_filter input_traits decompress(compression, binding) read_varint(type, reader) async;
[from_reader_impl] [cfg(all())] [impl Read] [decompress(compression, &mut binding)] [reader.read_varint::<type>()] [];
[from_async_reader_impl] [cfg(feature="async")] [(impl Unpin + Send + AsyncReadExt)] [decompress_async(compression, &mut binding)] [reader.read_varint_async::<type>().await] [async];
)]
#[allow(clippy::needless_range_loop)]
#[cfg_async_filter]
async fn fn_name(
input: &mut input_traits,
length: u64,
compression: Compression,
) -> Result<Self> {
let mut binding = input.take(length);
let mut reader = decompress([compression], [binding])?;
let num_entries = read_varint([usize], [reader])?;
let mut entries = Vec::<Entry>::with_capacity(num_entries);
let mut last_id = 0u64;
for _ in 0..num_entries {
let tmp = read_varint([u64], [reader])?;
last_id += tmp;
entries.push(Entry {
tile_id: last_id,
length: 0,
offset: 0,
run_length: 0,
});
}
for i in 0..num_entries {
entries[i].run_length = read_varint([_], [reader])?;
}
for i in 0..num_entries {
let len = read_varint([_], [reader])?;
if len == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Length of a directory entry must be greater than 0.",
));
}
entries[i].length = len;
}
for i in 0..num_entries {
let val = read_varint([u64], [reader])?;
entries[i].offset = if i > 0 && val == 0 {
entries[i - 1].offset + u64::from(entries[i - 1].length)
} else {
val - 1
};
}
Ok(Self { entries })
}
#[duplicate_item(
fn_name cfg_async_filter input_traits compress flush write_varint(writer, value) add_await(code) async;
[to_writer_impl] [cfg(all())] [impl Write] [compress] [flush] [writer.write_varint(value)] [code] [];
[to_async_writer_impl] [cfg(feature="async")] [(impl AsyncWrite + Unpin + Send)] [compress_async] [close] [writer.write_varint_async(value).await] [code.await] [async];
)]
#[cfg_async_filter]
async fn fn_name(&self, output: &mut input_traits, compression: Compression) -> Result<()> {
let mut writer = compress(compression, output)?;
write_varint([writer], [self.entries.len()])?;
let mut last_id = 0u64;
for entry in &self.entries {
write_varint([writer], [entry.tile_id - last_id])?;
last_id = entry.tile_id;
}
for entry in &self.entries {
write_varint([writer], [entry.run_length])?;
}
for entry in &self.entries {
if entry.length == 0 {
return Err(std::io::Error::new(
std::io::ErrorKind::InvalidData,
"Length of a directory entry must be greater than 0.",
));
}
write_varint([writer], [entry.length])?;
}
let mut next_byte = 0u64;
for (index, entry) in self.into_iter().enumerate() {
let val = if index > 0 && entry.offset == next_byte {
0
} else {
entry.offset + 1
};
write_varint([writer], [val])?;
next_byte = entry.offset + u64::from(entry.length);
}
add_await([writer.flush()])?;
Ok(())
}
}
impl Directory {
pub fn from_reader(
input: &mut impl Read,
length: u64,
compression: Compression,
) -> Result<Self> {
Self::from_reader_impl(input, length, compression)
}
pub fn from_bytes(bytes: impl AsRef<[u8]>, compression: Compression) -> std::io::Result<Self> {
let length = bytes.as_ref().len() as u64;
let mut reader = std::io::Cursor::new(bytes);
Self::from_reader(&mut reader, length, compression)
}
#[cfg(feature = "async")]
pub async fn from_async_reader(
input: &mut (impl Unpin + Send + AsyncReadExt),
length: u64,
compression: Compression,
) -> Result<Self> {
Self::from_async_reader_impl(input, length, compression).await
}
pub fn to_writer(&self, output: &mut impl Write, compression: Compression) -> Result<()> {
self.to_writer_impl(output, compression)
}
#[cfg(feature = "async")]
pub async fn to_async_writer(
&self,
output: &mut (impl AsyncWrite + Unpin + Send),
compression: Compression,
) -> Result<()> {
self.to_async_writer_impl(output, compression).await
}
}
impl Directory {
pub fn find_entry_for_tile_id(&self, tile_id: u64) -> Option<&Entry> {
self.into_iter()
.find(|e| !e.is_leaf_dir_entry() && e.tile_id_range().contains(&tile_id))
}
}
impl<I: SliceIndex<[Entry]>> Index<I> for Directory {
type Output = I::Output;
fn index(&self, index: I) -> &Self::Output {
self.entries.index(index)
}
}
impl<I: SliceIndex<[Entry]>> IndexMut<I> for Directory {
fn index_mut(&mut self, index: I) -> &mut Self::Output {
self.entries.index_mut(index)
}
}
impl From<Vec<Entry>> for Directory {
fn from(entries: Vec<Entry>) -> Self {
Self { entries }
}
}
impl From<Directory> for Vec<Entry> {
fn from(val: Directory) -> Self {
val.entries
}
}
#[cfg(test)]
#[allow(clippy::cast_possible_truncation)]
mod test {
use std::io::{Cursor, Seek, SeekFrom};
use crate::util::decompress_all;
use super::*;
const PM_TILES_BYTES: &[u8] =
include_bytes!("../test/stamen_toner(raster)CC-BY+ODbL_z3.pmtiles");
const ROOT_DIR_OFFSET: u64 = 127;
const ROOT_DIR_LENGTH: u64 = 246;
const ROOT_DIR_COMPRESSION: Compression = Compression::GZip;
#[test]
fn test_from_reader() -> Result<()> {
let mut reader = Cursor::new(PM_TILES_BYTES);
reader.seek(SeekFrom::Start(ROOT_DIR_OFFSET))?;
let dir = Directory::from_reader(&mut reader, ROOT_DIR_LENGTH, ROOT_DIR_COMPRESSION)?;
assert_eq!(reader.position(), ROOT_DIR_OFFSET + ROOT_DIR_LENGTH);
assert_eq!(dir.entries.len(), 84);
assert_eq!(
dir.entries[0],
Entry {
tile_id: 0,
offset: 0,
length: 18404,
run_length: 1
}
);
assert_eq!(
dir.entries[58],
Entry {
tile_id: 58,
offset: 422_070,
length: 850,
run_length: 2
}
);
assert_eq!(
dir.entries[83],
Entry {
tile_id: 84,
offset: 243_790,
length: 914,
run_length: 1
}
);
Ok(())
}
#[test]
fn test_to_writer() -> Result<()> {
let mut reader = Cursor::new(PM_TILES_BYTES);
reader.seek(SeekFrom::Start(ROOT_DIR_OFFSET))?;
let dir = Directory::from_reader(&mut reader, ROOT_DIR_LENGTH, ROOT_DIR_COMPRESSION)?;
let mut buf = Vec::<u8>::with_capacity(ROOT_DIR_LENGTH as usize);
let mut writer = Cursor::new(&mut buf);
dir.to_writer(&mut writer, ROOT_DIR_COMPRESSION)?;
let output = decompress_all(ROOT_DIR_COMPRESSION, &buf)?;
let expected = decompress_all(
ROOT_DIR_COMPRESSION,
&PM_TILES_BYTES[ROOT_DIR_OFFSET as usize..(ROOT_DIR_OFFSET + ROOT_DIR_LENGTH) as usize],
)?;
assert_eq!(output, expected);
Ok(())
}
#[test]
fn test_to_writer_invalid_entry() {
let mut dir = Directory {
entries: Vec::new(),
};
dir.entries.push(Entry {
length: 0,
offset: 0,
run_length: 0,
tile_id: 0,
});
let mut buf = Vec::<u8>::with_capacity(10);
let mut writer = Cursor::new(&mut buf);
assert!(dir.to_writer(&mut writer, ROOT_DIR_COMPRESSION).is_err());
}
}