mod compressor;
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::hash::BuildHasherDefault;
use std::io::{BufWriter, Seek, Write};
pub use compressor::Compressor;
pub(crate) use compressor::{GzipCompressor, NoCompression};
use countio::Counter;
use twox_hash::XxHash3_64;
use crate::header::{HEADER_SIZE, MAX_INITIAL_BYTES};
use crate::{
Compression, DirEntry, Directory, Header, PmtError, PmtResult, TileCoord, TileId, TileType,
};
const MAX_ROOT_DIR_BYTES: usize = MAX_INITIAL_BYTES - HEADER_SIZE;
pub struct PmTilesWriter {
header: Header,
metadata: String,
tile_compressor: Box<dyn Compressor>,
internal_compressor: Box<dyn Compressor>,
}
struct TileContentLocation {
offset: u64,
length: u32,
}
pub struct PmTilesStreamWriter<W: Write + Seek> {
state: WriterState<W>,
tile_compressor: Box<dyn Compressor>,
internal_compressor: Box<dyn Compressor>,
}
struct WriterState<W: Write + Seek> {
out: Counter<BufWriter<W>>,
header: Header,
entries: Vec<DirEntry>,
n_addressed_tiles: u64,
n_tile_entries: u64,
tile_content_map: HashMap<u64, TileContentLocation, BuildHasherDefault<XxHash3_64>>,
prev_tile_hash: Option<u64>,
prev_written_tile_offset: u64,
}
struct DynWriter<'a>(&'a mut dyn Write);
impl Write for DynWriter<'_> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
self.0.write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
self.0.flush()
}
}
pub(crate) trait WriteTo {
fn write_to<W: Write>(&self, writer: &mut W) -> std::io::Result<()>;
fn write_compressed_to<W: Write>(
&self,
writer: &mut W,
compressor: &dyn Compressor,
) -> PmtResult<()> {
compressor.compress(
&mut |encoder| self.write_to(&mut DynWriter(encoder)),
writer,
)
}
fn write_compressed_to_counted<W: Write>(
&self,
writer: &mut Counter<W>,
compressor: &dyn Compressor,
) -> PmtResult<usize> {
let pos = writer.writer_bytes();
self.write_compressed_to(writer, compressor)?;
Ok(writer.writer_bytes() - pos)
}
fn compressed_size(&self, compressor: &dyn Compressor) -> PmtResult<usize> {
let mut devnull = Counter::new(std::io::sink());
self.write_compressed_to(&mut devnull, compressor)?;
Ok(devnull.writer_bytes())
}
}
impl WriteTo for [u8] {
fn write_to<W: Write>(&self, writer: &mut W) -> std::io::Result<()> {
writer.write_all(self)
}
}
impl PmTilesWriter {
#[must_use]
pub fn new(tile_type: TileType) -> Self {
let tile_compressor: Box<dyn Compressor> = match tile_type {
TileType::Mvt => Box::new(GzipCompressor::default()),
_ => Box::new(NoCompression),
};
let internal_compressor: Box<dyn Compressor> = Box::new(GzipCompressor::default());
let header = Header::new(tile_compressor.compression(), tile_type);
Self {
header,
metadata: "{}".to_string(),
tile_compressor,
internal_compressor,
}
}
#[must_use]
pub fn internal_compression(mut self, compression: Compression) -> Self {
self.header.internal_compression = compression;
self.internal_compressor = compression.into();
self
}
#[must_use]
pub fn tile_compression(mut self, compression: Compression) -> Self {
self.header.tile_compression = compression;
self.tile_compressor = compression.into();
self
}
#[must_use]
pub fn tile_codec(mut self, compressor: impl Compressor + 'static) -> Self {
self.header.tile_compression = compressor.compression();
self.tile_compressor = Box::new(compressor);
self
}
#[must_use]
pub fn internal_codec(mut self, compressor: impl Compressor + 'static) -> Self {
self.header.internal_compression = compressor.compression();
self.internal_compressor = Box::new(compressor);
self
}
#[must_use]
pub fn min_zoom(mut self, level: u8) -> Self {
self.header.min_zoom = level;
self
}
#[must_use]
pub fn max_zoom(mut self, level: u8) -> Self {
self.header.max_zoom = level;
self
}
#[must_use]
pub fn bounds(mut self, min_lon: f64, min_lat: f64, max_lon: f64, max_lat: f64) -> Self {
self.header.min_latitude = min_lat;
self.header.min_longitude = min_lon;
self.header.max_latitude = max_lat;
self.header.max_longitude = max_lon;
self
}
#[must_use]
pub fn center_zoom(mut self, level: u8) -> Self {
self.header.center_zoom = level;
self
}
#[must_use]
pub fn center(mut self, lon: f64, lat: f64) -> Self {
self.header.center_latitude = lat;
self.header.center_longitude = lon;
self
}
#[must_use]
pub fn metadata(mut self, metadata: &str) -> Self {
self.metadata = metadata.to_string();
self
}
pub fn create<W: Write + Seek>(self, writer: W) -> PmtResult<PmTilesStreamWriter<W>> {
let mut out = Counter::new(BufWriter::new(writer));
out.write_all(&[0u8; MAX_INITIAL_BYTES])?;
let metadata_length = self
.metadata
.as_bytes()
.write_compressed_to_counted(&mut out, &self.internal_compressor)?
as u64;
let mut state = WriterState {
out,
header: self.header,
entries: Vec::new(),
n_addressed_tiles: 0,
n_tile_entries: 0,
tile_content_map: HashMap::default(),
prev_tile_hash: None,
prev_written_tile_offset: 0,
};
state.header.metadata_length = metadata_length;
state.header.data_offset = MAX_INITIAL_BYTES as u64 + metadata_length;
let writer = PmTilesStreamWriter {
state,
tile_compressor: self.tile_compressor,
internal_compressor: self.internal_compressor,
};
Ok(writer)
}
}
impl<W: Write + Seek> PmTilesStreamWriter<W> {
pub fn add_tile(&mut self, coord: TileCoord, data: &[u8]) -> PmtResult<()> {
self.state
.add_tile_by_id(coord.into(), data, &self.tile_compressor)
}
pub fn add_raw_tile(&mut self, coord: TileCoord, data: &[u8]) -> PmtResult<()> {
self.state
.add_tile_by_id(coord.into(), data, &NoCompression)
}
}
impl<W: Write + Seek> WriterState<W> {
fn add_tile_by_id(
&mut self,
tile_id: TileId,
data: &[u8],
compressor: &dyn Compressor,
) -> PmtResult<()> {
if data.is_empty() {
return Ok(());
}
let tile_id = tile_id.value();
let mut last_entry = self.entries.last_mut();
let tile_hash: u64 = XxHash3_64::oneshot(data);
self.n_addressed_tiles += 1;
if let Some(ref mut last_entry) = last_entry {
if self.prev_tile_hash == Some(tile_hash)
&& tile_id == last_entry.tile_id + u64::from(last_entry.run_length)
{
last_entry.run_length += 1;
return Ok(());
}
if tile_id < last_entry.tile_id + u64::from(last_entry.run_length) {
self.header.clustered = false;
}
}
let loc = match self.tile_content_map.entry(tile_hash) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
let offset = self.prev_written_tile_offset;
let len = data.write_compressed_to_counted(&mut self.out, compressor)?;
self.prev_written_tile_offset += len as u64;
let length = into_u32(len)?;
e.insert(TileContentLocation { offset, length })
}
};
self.prev_tile_hash = Some(tile_hash);
self.n_tile_entries += 1;
self.entries.push(DirEntry {
tile_id,
run_length: 1, offset: loc.offset,
length: loc.length,
});
Ok(())
}
fn build_directories(&mut self, compressor: &dyn Compressor) -> PmtResult<Directory> {
if !self.header.clustered {
self.entries.sort_by_key(|entry| entry.tile_id);
}
let (root_dir, leaf_dirs) = self.optimize_directories(MAX_ROOT_DIR_BYTES, compressor)?;
let mut leaves_bytes = 0usize;
if !leaf_dirs.is_empty() {
self.header.leaf_offset = self.out.writer_bytes() as u64;
}
for leaf in &leaf_dirs {
leaves_bytes += leaf.write_compressed_to_counted(&mut self.out, compressor)?;
}
self.header.leaf_length = leaves_bytes as u64;
Ok(root_dir)
}
fn optimize_directories(
&mut self,
target_root_len: usize,
compressor: &dyn Compressor,
) -> PmtResult<(Directory, Vec<Directory>)> {
if self.entries.len() < 16_384 {
let root_dir = Directory::from_entries(std::mem::take(&mut self.entries));
let root_bytes = root_dir.compressed_size(compressor)?;
if root_bytes <= target_root_len {
return Ok((root_dir, vec![]));
}
self.entries = root_dir.entries;
}
let mut leaf_size = (self.entries.len() / 3500).max(4096);
loop {
let (root_dir, leaf_dirs) = self.build_roots_leaves(leaf_size, compressor)?;
let root_bytes = root_dir.compressed_size(compressor)?;
if root_bytes <= target_root_len {
return Ok((root_dir, leaf_dirs));
}
leaf_size += leaf_size / 5; }
}
fn build_roots_leaves(
&self,
leaf_size: usize,
compressor: &dyn Compressor,
) -> PmtResult<(Directory, Vec<Directory>)> {
let mut root_dir = Directory::with_capacity(self.entries.len() / leaf_size);
let mut leaves = Vec::with_capacity(self.entries.len() / leaf_size);
let mut offset = 0;
for chunk in self.entries.chunks(leaf_size) {
let leaf = Directory::from_entries(chunk.to_vec());
let leaf_size = leaf.compressed_size(compressor)?;
leaves.push(leaf);
root_dir.push(DirEntry {
tile_id: chunk[0].tile_id,
offset,
length: into_u32(leaf_size)?,
run_length: 0,
});
offset += leaf_size as u64;
}
Ok((root_dir, leaves))
}
}
impl<W: Write + Seek> PmTilesStreamWriter<W> {
pub fn finalize(mut self) -> PmtResult<()> {
let state = &mut self.state;
state.header.data_length = state.out.writer_bytes() as u64
- MAX_INITIAL_BYTES as u64
- state.header.metadata_length;
let root_dir = state.build_directories(&self.internal_compressor)?;
state.header.n_addressed_tiles = state.n_addressed_tiles.try_into().ok();
state.header.n_tile_contents = (state.tile_content_map.len() as u64).try_into().ok();
state.header.n_tile_entries = state.n_tile_entries.try_into().ok();
let mut root_dir_buf = vec![];
root_dir.write_compressed_to(&mut root_dir_buf, &self.internal_compressor)?;
state.header.root_length = root_dir_buf.len() as u64;
state.out.rewind()?;
state.header.write_to(&mut state.out)?;
state.out.write_all(&root_dir_buf)?;
state.out.flush()?;
Ok(())
}
}
fn into_u32(v: usize) -> PmtResult<u32> {
v.try_into().map_err(|_| PmtError::IndexEntryOverflow)
}
#[cfg(test)]
#[cfg(feature = "mmap-async-tokio")]
#[expect(clippy::float_cmp)]
mod tests {
use std::fs::File;
use std::num::NonZeroU64;
use std::sync::Arc;
use futures_util::TryStreamExt;
use rstest::rstest;
use tempfile::NamedTempFile;
use crate::tests::RASTER_FILE;
use crate::writer::GzipCompressor;
use crate::{
AsyncPmTilesReader, Compression, MmapBackend, PmTilesWriter, TileCoord, TileId, TileType,
};
fn get_temp_file_path(suffix: &str) -> std::io::Result<String> {
let temp_file = NamedTempFile::with_suffix(suffix)?;
Ok(temp_file.path().to_string_lossy().into_owned())
}
#[tokio::test]
async fn roundtrip_raster() {
let backend = MmapBackend::try_from(RASTER_FILE).await.unwrap();
let tiles_in = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
let header_in = tiles_in.get_header();
let metadata_in = tiles_in.get_metadata().await.unwrap();
let num_tiles = header_in.n_addressed_tiles.unwrap();
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(path.clone()).unwrap();
let mut writer = PmTilesWriter::new(header_in.tile_type)
.max_zoom(header_in.max_zoom)
.metadata(&metadata_in)
.create(file)
.unwrap();
for id in 0..num_tiles.into() {
let id = TileId::new(id).unwrap();
let tile = tiles_in.get_tile(id).await.unwrap().unwrap();
writer.add_raw_tile(id.into(), &tile).unwrap();
}
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
let header_out = tiles_out.get_header();
assert_eq!(header_in.tile_type, header_out.tile_type);
assert_eq!(header_in.n_addressed_tiles, header_out.n_addressed_tiles);
assert_eq!(header_in.n_tile_entries, header_out.n_tile_entries);
assert_eq!(header_in.n_tile_contents, header_out.n_tile_contents);
assert_eq!(header_in.min_zoom, header_out.min_zoom);
assert_eq!(header_in.max_zoom, header_out.max_zoom);
assert_eq!(header_in.center_zoom, header_out.center_zoom);
assert_eq!(header_in.center_latitude, header_out.center_latitude);
assert_eq!(header_in.center_longitude, header_out.center_longitude);
assert_eq!(
header_in.min_latitude.round(),
header_out.min_latitude.round()
);
assert_eq!(
header_in.max_latitude.round(),
header_out.max_latitude.round()
);
assert_eq!(header_in.min_longitude, header_out.min_longitude);
assert_eq!(header_in.max_longitude, header_out.max_longitude);
assert_eq!(header_in.clustered, header_out.clustered);
let metadata_out = tiles_out.get_metadata().await.unwrap();
assert_eq!(metadata_in, metadata_out);
for (z, x, y) in [(0, 0, 0), (2, 2, 2), (3, 4, 5)] {
let coord = TileCoord::new(z, x, y).unwrap();
let tile_in = tiles_in.get_tile(coord).await.unwrap().unwrap();
let tile_out = tiles_out.get_tile(coord).await.unwrap().unwrap();
assert_eq!(tile_in.len(), tile_out.len());
}
}
fn gen_entries(num_tiles: u64) -> String {
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Png)
.internal_compression(Compression::None)
.create(file)
.unwrap();
for tile_id in 0..num_tiles {
let data: Vec<u8> = tile_id.to_le_bytes().to_vec();
writer
.add_tile(TileId::new(tile_id).unwrap().into(), &data)
.unwrap();
}
writer.finalize().unwrap();
path
}
async fn verify_entries(file_path: &str, num_tiles: u64) {
let backend = MmapBackend::try_from(file_path).await.unwrap();
let tiles_out = Arc::new(AsyncPmTilesReader::try_from_source(backend).await.unwrap());
let header_out = tiles_out.get_header();
assert_eq!(header_out.n_addressed_tiles, NonZeroU64::new(num_tiles));
assert_eq!(header_out.n_tile_entries, NonZeroU64::new(num_tiles));
assert_eq!(header_out.n_tile_contents, NonZeroU64::new(num_tiles));
let entries = tiles_out
.clone()
.entries()
.try_collect::<Vec<_>>()
.await
.unwrap();
let coords = entries
.iter()
.flat_map(|e| e.iter_coords())
.collect::<Vec<_>>();
assert_eq!(coords.len(), usize::try_from(num_tiles).unwrap());
for tile_id in &[coords.first().unwrap(), coords.last().unwrap()] {
let data: Vec<u8> = tile_id.value().to_le_bytes().to_vec();
let tile_out = tiles_out.get_tile(**tile_id).await.unwrap().unwrap();
assert_eq!(tile_out, data);
}
}
#[tokio::test]
async fn no_leaves() {
let path = gen_entries(100);
verify_entries(&path, 100).await;
}
#[tokio::test]
async fn with_leaves() {
let path = gen_entries(20000);
verify_entries(&path, 20000).await;
}
#[test]
fn unclustered() {
let file = get_temp_file_path("pmtiles").unwrap();
let file = File::create(file).unwrap();
let mut writer = PmTilesWriter::new(TileType::Png).create(file).unwrap();
assert_eq!(writer.state.header.tile_compression, Compression::None);
let id = TileId::new(2).unwrap();
writer.add_tile(id.into(), &[0, 1, 2, 3]).unwrap();
assert!(writer.state.header.clustered);
let id = TileId::new(0).unwrap();
writer.add_tile(id.into(), &[0, 1, 2, 3]).unwrap();
assert!(!writer.state.header.clustered);
writer.finalize().unwrap();
}
#[rstest]
#[case(Compression::Gzip)]
#[cfg_attr(feature = "brotli", case(Compression::Brotli))]
#[cfg_attr(feature = "zstd", case(Compression::Zstd))]
#[tokio::test]
async fn raw_tiles(#[case] compression: Compression) {
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Mvt)
.tile_compression(compression)
.create(file)
.unwrap();
let precompressed_id = TileId::new(0).unwrap();
writer.add_raw_tile(precompressed_id.into(), &[0]).unwrap();
let regular_id = TileId::new(1).unwrap();
writer.add_tile(regular_id.into(), &[1]).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
let header = tiles_out.get_header();
assert_eq!(header.tile_compression, compression);
let precompressed_tile_raw = tiles_out.get_tile(precompressed_id).await.unwrap().unwrap();
assert_eq!(*precompressed_tile_raw, [0]);
let regular_tile = tiles_out
.get_tile_decompressed(regular_id)
.await
.unwrap()
.unwrap();
assert_eq!(*regular_tile, [1]);
}
#[rstest]
#[case(Compression::Gzip)]
#[cfg_attr(feature = "brotli", case(Compression::Brotli))]
#[cfg_attr(feature = "zstd", case(Compression::Zstd))]
#[tokio::test]
async fn internal_compression_roundtrip(#[case] compression: Compression) {
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let test_metadata = r#"{"name":"test","description":"internal compression test"}"#;
let mut writer = PmTilesWriter::new(TileType::Mvt)
.internal_compression(compression)
.metadata(test_metadata)
.create(file)
.unwrap();
for tile_id in 0..100u64 {
let data: Vec<u8> = tile_id.to_le_bytes().to_vec();
writer
.add_tile(TileId::new(tile_id).unwrap().into(), &data)
.unwrap();
}
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = Arc::new(AsyncPmTilesReader::try_from_source(backend).await.unwrap());
let header = tiles_out.get_header();
assert_eq!(header.internal_compression, compression);
let metadata_out = tiles_out.get_metadata().await.unwrap();
assert_eq!(metadata_out, test_metadata);
let entries = tiles_out
.clone()
.entries()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(entries.len(), 100);
let tile_0 = tiles_out
.get_tile_decompressed(TileId::new(0).unwrap())
.await
.unwrap()
.unwrap();
assert_eq!(*tile_0, 0u64.to_le_bytes());
let tile_99 = tiles_out
.get_tile_decompressed(TileId::new(99).unwrap())
.await
.unwrap()
.unwrap();
assert_eq!(*tile_99, 99u64.to_le_bytes());
}
#[tokio::test]
async fn dedup_nonconsecutive_tiles_no_rle() {
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Png)
.internal_compression(Compression::None)
.create(file)
.unwrap();
let a = b"ABC";
let b = b"X";
let c = b"ABC";
writer.add_tile(TileId::new(0).unwrap().into(), a).unwrap();
writer.add_tile(TileId::new(1).unwrap().into(), b).unwrap();
writer.add_tile(TileId::new(2).unwrap().into(), c).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = Arc::new(AsyncPmTilesReader::try_from_source(backend).await.unwrap());
let header = tiles_out.get_header();
assert_eq!(header.n_addressed_tiles, NonZeroU64::new(3));
assert_eq!(header.n_tile_entries, NonZeroU64::new(3));
assert_eq!(header.n_tile_contents, NonZeroU64::new(2));
let entries = tiles_out
.clone()
.entries()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(entries.len(), 3);
let e0 = entries.iter().find(|e| e.tile_id == 0).unwrap();
let e1 = entries.iter().find(|e| e.tile_id == 1).unwrap();
let e2 = entries.iter().find(|e| e.tile_id == 2).unwrap();
assert_eq!(e0.run_length, 1);
assert_eq!(e1.run_length, 1);
assert_eq!(e2.run_length, 1);
assert_eq!(e0.offset, e2.offset);
assert_eq!(e0.length, e2.length);
assert_ne!(e1.offset, e0.offset);
}
#[cfg(feature = "zstd")]
#[tokio::test]
async fn zstd_compressor_roundtrip() {
use super::compressor::ZstdCompressor;
let test_data = b"hello pmtiles zstd compressor with custom level";
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Mvt)
.tile_codec(ZstdCompressor(19))
.create(file)
.unwrap();
let tile_id = TileId::new(0).unwrap();
writer.add_tile(tile_id.into(), test_data).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
assert_eq!(tiles_out.get_header().tile_compression, Compression::Zstd);
let tile = tiles_out
.get_tile_decompressed(tile_id)
.await
.unwrap()
.unwrap();
assert_eq!(&*tile, test_data);
}
#[tokio::test]
async fn gzip_compressor_best_roundtrip() {
let test_data = b"hello pmtiles gzip best compressor with enough data to see a difference";
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Mvt)
.tile_codec(GzipCompressor(flate2::Compression::best()))
.create(file)
.unwrap();
let tile_id = TileId::new(0).unwrap();
writer.add_tile(tile_id.into(), test_data).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
assert_eq!(tiles_out.get_header().tile_compression, Compression::Gzip);
let tile = tiles_out
.get_tile_decompressed(tile_id)
.await
.unwrap()
.unwrap();
assert_eq!(&*tile, test_data);
}
#[tokio::test]
async fn custom_compressor_roundtrip() {
use super::{Compressor, NoCompression};
struct CustomTestCompressor;
impl Compressor for CustomTestCompressor {
fn compression(&self) -> Compression {
Compression::None
}
fn compress(
&self,
f: &mut dyn FnMut(&mut dyn std::io::Write) -> std::io::Result<()>,
writer: &mut dyn std::io::Write,
) -> crate::PmtResult<()> {
NoCompression.compress(f, writer)
}
}
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Png)
.tile_codec(CustomTestCompressor)
.create(file)
.unwrap();
let test_data = b"custom compressor test data";
let tile_id = TileId::new(0).unwrap();
writer.add_tile(tile_id.into(), test_data).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
let tile = tiles_out.get_tile(tile_id).await.unwrap().unwrap();
assert_eq!(&*tile, test_data);
}
#[cfg(feature = "brotli")]
#[tokio::test]
async fn brotli_compressor_roundtrip() {
use super::compressor::BrotliCompressor;
let test_data = b"hello pmtiles brotli compressor with enough data to see a difference";
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Mvt)
.tile_codec(BrotliCompressor(brotli::enc::BrotliEncoderParams {
quality: 5,
..Default::default()
}))
.create(file)
.unwrap();
let tile_id = TileId::new(0).unwrap();
writer.add_tile(tile_id.into(), test_data).unwrap();
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = AsyncPmTilesReader::try_from_source(backend).await.unwrap();
assert_eq!(tiles_out.get_header().tile_compression, Compression::Brotli);
let tile = tiles_out
.get_tile_decompressed(tile_id)
.await
.unwrap()
.unwrap();
assert_eq!(&*tile, test_data);
}
#[tokio::test]
async fn internal_codec_roundtrip() {
let test_metadata = r#"{"name":"test","description":"internal codec test"}"#;
let path = get_temp_file_path("pmtiles").unwrap();
let file = File::create(&path).unwrap();
let mut writer = PmTilesWriter::new(TileType::Mvt)
.internal_codec(GzipCompressor(flate2::Compression::best()))
.metadata(test_metadata)
.create(file)
.unwrap();
for tile_id in 0..100u64 {
let data: Vec<u8> = tile_id.to_le_bytes().to_vec();
writer
.add_tile(TileId::new(tile_id).unwrap().into(), &data)
.unwrap();
}
writer.finalize().unwrap();
let backend = MmapBackend::try_from(&path).await.unwrap();
let tiles_out = Arc::new(AsyncPmTilesReader::try_from_source(backend).await.unwrap());
let header = tiles_out.get_header();
assert_eq!(header.internal_compression, Compression::Gzip);
let metadata_out = tiles_out.get_metadata().await.unwrap();
assert_eq!(metadata_out, test_metadata);
let entries = tiles_out
.clone()
.entries()
.try_collect::<Vec<_>>()
.await
.unwrap();
assert_eq!(entries.len(), 100);
let tile_0 = tiles_out
.get_tile_decompressed(TileId::new(0).unwrap())
.await
.unwrap()
.unwrap();
assert_eq!(*tile_0, 0u64.to_le_bytes());
}
}