use crate::field_encoder::build_encoders;
use crate::header::encode_header;
use crate::types::{CompressionOption, EncodingInfo, POINTS_PER_CHUNK};
pub struct PointcloudEncoder {
info: EncodingInfo,
header: Vec<u8>,
#[cfg(feature = "parallel")]
threads: bool,
}
impl PointcloudEncoder {
pub fn new(info: EncodingInfo) -> Self {
let header = encode_header(&info);
Self {
info,
header,
#[cfg(feature = "parallel")]
threads: false,
}
}
pub fn with_threads(self, threads: bool) -> Self {
let _ = threads;
#[cfg(feature = "parallel")]
return Self { threads, ..self };
#[cfg(not(feature = "parallel"))]
return self;
}
pub fn encoding_info(&self) -> &EncodingInfo {
&self.info
}
pub fn header_bytes(&self) -> &[u8] {
&self.header
}
pub fn encode(&self, cloud_data: &[u8]) -> crate::Result<Vec<u8>> {
if self.info.point_step == 0 {
return Err(crate::Error::ZeroPointStep);
}
if cloud_data.len() % self.info.point_step as usize != 0 {
return Err(crate::Error::DataLengthMismatch);
}
let mut out = Vec::with_capacity(self.header.len() + cloud_data.len() + 8);
out.extend_from_slice(&self.header);
self.encode_chunks(cloud_data, &mut out)?;
Ok(out)
}
pub fn encode_chunks(&self, cloud_data: &[u8], out: &mut Vec<u8>) -> crate::Result<()> {
let point_step = self.info.point_step as usize;
let points_count = cloud_data.len() / point_step;
let max_encoded_per_point = self
.info
.fields
.iter()
.fold(0, |acc, f| {
acc + max_field_encoded_size(f, self.info.encoding_opt)
})
.max(point_step);
#[cfg(feature = "parallel")]
if self.threads {
return encode_chunks_parallel(
&self.info,
cloud_data,
out,
point_step,
points_count,
max_encoded_per_point,
);
}
encode_chunks_sequential(
&self.info,
cloud_data,
out,
point_step,
points_count,
max_encoded_per_point,
)
}
}
fn encode_chunks_sequential(
info: &EncodingInfo,
cloud_data: &[u8],
out: &mut Vec<u8>,
point_step: usize,
points_count: usize,
max_encoded_per_point: usize,
) -> crate::Result<()> {
let mut encoders = build_encoders(&info.fields, info.encoding_opt);
let max_scratch_cap = POINTS_PER_CHUNK * max_encoded_per_point + 64;
let mut scratch = vec![0u8; max_scratch_cap];
let mut point_offset = 0;
while point_offset < points_count {
let chunk_end = (point_offset + POINTS_PER_CHUNK).min(points_count);
for enc in &mut encoders {
enc.reset();
}
let mut scratch_pos = 0;
for pt_idx in point_offset..chunk_end {
let point = &cloud_data[pt_idx * point_step..(pt_idx + 1) * point_step];
for enc in &mut encoders {
let written = enc.encode(point, &mut scratch[scratch_pos..]);
scratch_pos += written;
}
}
let encoded_data = &scratch[..scratch_pos];
let compressed = compress_chunk(encoded_data, info.compression_opt)?;
let chunk_size = compressed.len() as u32;
out.extend_from_slice(&chunk_size.to_le_bytes());
out.extend_from_slice(&compressed);
point_offset = chunk_end;
}
Ok(())
}
#[cfg(feature = "parallel")]
fn encode_chunks_parallel(
info: &EncodingInfo,
cloud_data: &[u8],
out: &mut Vec<u8>,
point_step: usize,
points_count: usize,
max_encoded_per_point: usize,
) -> crate::Result<()> {
use rayon::prelude::*;
let n_chunks = points_count.div_ceil(POINTS_PER_CHUNK);
let chunk_results: Vec<crate::Result<Vec<u8>>> = (0..n_chunks)
.into_par_iter()
.map(|chunk_idx| {
let point_offset = chunk_idx * POINTS_PER_CHUNK;
let chunk_end = (point_offset + POINTS_PER_CHUNK).min(points_count);
let chunk_point_count = chunk_end - point_offset;
let mut encoders = build_encoders(&info.fields, info.encoding_opt);
let scratch_cap = chunk_point_count * max_encoded_per_point + 64;
let mut scratch = vec![0u8; scratch_cap];
let mut scratch_pos = 0;
for pt_idx in point_offset..chunk_end {
let point = &cloud_data[pt_idx * point_step..(pt_idx + 1) * point_step];
for enc in &mut encoders {
let written = enc.encode(point, &mut scratch[scratch_pos..]);
scratch_pos += written;
}
}
compress_chunk(&scratch[..scratch_pos], info.compression_opt)
})
.collect();
for result in chunk_results {
let compressed = result?;
out.extend_from_slice(&(compressed.len() as u32).to_le_bytes());
out.extend_from_slice(&compressed);
}
Ok(())
}
fn max_field_encoded_size(
field: &crate::types::PointField,
encoding_opt: crate::types::EncodingOptions,
) -> usize {
use crate::types::{EncodingOptions, FieldType};
match field.field_type {
FieldType::Int16
| FieldType::Uint16
| FieldType::Int32
| FieldType::Uint32
| FieldType::Int64
| FieldType::Uint64 => 10,
FieldType::Float32 => {
if encoding_opt == EncodingOptions::Lossy && field.resolution.is_some() {
10
} else {
4
}
}
FieldType::Float64 => {
if encoding_opt == EncodingOptions::Lossy && field.resolution.is_some() {
10
} else {
8
}
}
FieldType::Int8 | FieldType::Uint8 => 1,
_ => 10,
}
}
fn compress_chunk(data: &[u8], opt: CompressionOption) -> crate::Result<Vec<u8>> {
match opt {
CompressionOption::None => Ok(data.to_vec()),
CompressionOption::Lz4 => {
let max_out = lz4_flex::block::get_maximum_output_size(data.len());
let mut compressed = vec![0u8; max_out];
let n = lz4_flex::block::compress_into(data, &mut compressed)
.map_err(|e| crate::Error::Lz4(e.to_string()))?;
compressed.truncate(n);
Ok(compressed)
}
CompressionOption::Zstd => {
zstd::bulk::compress(data, 1).map_err(|e| crate::Error::Zstd(e.to_string()))
}
}
}