Skip to main content

copc_streaming/
chunk.rs

1//! Point chunk fetching and LAZ decompression.
2
3use std::io::Cursor;
4
5use laz::LazVlr;
6use laz::record::{LayeredPointRecordDecompressor, RecordDecompressor};
7
8use crate::byte_source::ByteSource;
9use crate::error::CopcError;
10use crate::hierarchy::HierarchyEntry;
11use crate::types::VoxelKey;
12
13/// A decompressed point data chunk.
14#[non_exhaustive]
15pub struct DecompressedChunk {
16    /// The octree node this chunk belongs to.
17    pub key: VoxelKey,
18    /// Raw decompressed point record bytes.
19    pub data: Vec<u8>,
20    /// Number of points in this chunk.
21    pub point_count: u32,
22    /// Size of a single point record in bytes.
23    pub point_record_length: u16,
24}
25
26/// Fetch and decompress a single chunk.
27pub async fn fetch_and_decompress(
28    source: &impl ByteSource,
29    entry: &HierarchyEntry,
30    laz_vlr: &LazVlr,
31    point_record_length: u16,
32) -> Result<DecompressedChunk, CopcError> {
33    let compressed = source
34        .read_range(entry.offset, entry.byte_size as u64)
35        .await?;
36
37    let decompressed_size = entry.point_count as usize * point_record_length as usize;
38    let mut decompressed = vec![0u8; decompressed_size];
39
40    decompress_copc_chunk(&compressed, &mut decompressed, laz_vlr)?;
41
42    Ok(DecompressedChunk {
43        key: entry.key,
44        data: decompressed,
45        point_count: entry.point_count,
46        point_record_length,
47    })
48}
49
50/// Decompress a single COPC chunk.
51///
52/// COPC chunks are independently compressed and do NOT start with the 8-byte
53/// chunk table offset that standard LAZ files have. We use
54/// `LayeredPointRecordDecompressor` directly (the same approach as copc-rs)
55/// to bypass `LasZipDecompressor`'s chunk table handling.
56fn decompress_copc_chunk(
57    compressed: &[u8],
58    decompressed: &mut [u8],
59    laz_vlr: &LazVlr,
60) -> Result<(), CopcError> {
61    let src = Cursor::new(compressed);
62    let mut decompressor = LayeredPointRecordDecompressor::new(src);
63    decompressor.set_fields_from(laz_vlr.items())?;
64    decompressor.decompress_many(decompressed)?;
65    Ok(())
66}
67
68/// Parse all points from a decompressed chunk into `las::Point` values.
69pub fn read_points(
70    chunk: &DecompressedChunk,
71    header: &las::Header,
72) -> Result<Vec<las::Point>, CopcError> {
73    read_points_range(chunk, header, 0..chunk.point_count)
74}
75
76/// Parse a sub-range of points from a decompressed chunk.
77///
78/// Only the points in `range` are parsed — bytes outside the range are skipped.
79/// Returns an error if the range extends beyond the chunk's point count.
80pub fn read_points_range(
81    chunk: &DecompressedChunk,
82    header: &las::Header,
83    range: std::ops::Range<u32>,
84) -> Result<Vec<las::Point>, CopcError> {
85    if range.end > chunk.point_count {
86        return Err(CopcError::Io(std::io::Error::new(
87            std::io::ErrorKind::InvalidInput,
88            format!(
89                "point range {}..{} exceeds chunk point count {}",
90                range.start, range.end, chunk.point_count
91            ),
92        )));
93    }
94
95    let format = header.point_format();
96    let transforms = header.transforms();
97    let record_len = chunk.point_record_length as u64;
98
99    let start = (range.start as u64 * record_len) as usize;
100    let count = range.end.saturating_sub(range.start) as usize;
101
102    let mut cursor = Cursor::new(&chunk.data[start..]);
103    let mut points = Vec::with_capacity(count);
104
105    for _ in 0..count {
106        let raw = las::raw::Point::read_from(&mut cursor, format)?;
107        points.push(las::Point::new(raw, transforms));
108    }
109
110    Ok(points)
111}