1use std::collections::HashMap;
6use std::io::Cursor;
7
8use crate::types::{Aabb, VoxelKey};
9use byteorder::{LittleEndian, ReadBytesExt};
10
11use crate::byte_source::ByteSource;
12use crate::error::CopcError;
13use crate::header::CopcInfo;
14
15#[derive(Debug, Clone)]
20#[non_exhaustive]
21pub struct HierarchyEntry {
22 pub key: VoxelKey,
24 pub offset: u64,
26 pub byte_size: u32,
28 pub point_count: u32,
30}
31
32#[derive(Debug, Clone)]
34struct PendingPage {
35 key: VoxelKey,
37 offset: u64,
38 size: u64,
39}
40
41pub struct HierarchyCache {
43 entries: HashMap<VoxelKey, HierarchyEntry>,
45 pending_pages: Vec<PendingPage>,
47 root_loaded: bool,
49}
50
51impl Default for HierarchyCache {
52 fn default() -> Self {
53 Self::new()
54 }
55}
56
57impl HierarchyCache {
58 pub fn new() -> Self {
60 Self {
61 entries: HashMap::new(),
62 pending_pages: Vec::new(),
63 root_loaded: false,
64 }
65 }
66
67 pub async fn load_root(
69 &mut self,
70 source: &impl ByteSource,
71 info: &CopcInfo,
72 ) -> Result<(), CopcError> {
73 if self.root_loaded {
74 return Ok(());
75 }
76 let data = source
77 .read_range(info.root_hier_offset, info.root_hier_size)
78 .await?;
79 self.parse_page(&data, info.root_hier_offset)?;
80 self.root_loaded = true;
81 Ok(())
82 }
83
84 pub async fn load_pending_pages(&mut self, source: &impl ByteSource) -> Result<(), CopcError> {
86 if self.pending_pages.is_empty() {
87 return Ok(());
88 }
89 let pages: Vec<_> = self.pending_pages.drain(..).collect();
90 let ranges: Vec<_> = pages.iter().map(|p| (p.offset, p.size)).collect();
91 let results = source.read_ranges(&ranges).await?;
92
93 for (page, data) in pages.iter().zip(results) {
94 self.parse_page(&data, page.offset)?;
95 }
96
97 Ok(())
98 }
99
100 pub async fn load_all(
102 &mut self,
103 source: &impl ByteSource,
104 info: &CopcInfo,
105 ) -> Result<(), CopcError> {
106 self.load_root(source, info).await?;
107
108 while !self.pending_pages.is_empty() {
109 self.load_pending_pages(source).await?;
110 }
111
112 Ok(())
113 }
114
115 pub async fn load_pages_for_bounds(
122 &mut self,
123 source: &impl ByteSource,
124 bounds: &Aabb,
125 root_bounds: &Aabb,
126 ) -> Result<(), CopcError> {
127 loop {
128 let matching: Vec<PendingPage> = self
129 .pending_pages
130 .iter()
131 .filter(|p| p.key.bounds(root_bounds).intersects(bounds))
132 .cloned()
133 .collect();
134
135 if matching.is_empty() {
136 break;
137 }
138
139 self.pending_pages
140 .retain(|p| !p.key.bounds(root_bounds).intersects(bounds));
141
142 let ranges: Vec<_> = matching.iter().map(|p| (p.offset, p.size)).collect();
143 let results = source.read_ranges(&ranges).await?;
144
145 for (page, data) in matching.iter().zip(results) {
146 self.parse_page(&data, page.offset)?;
147 }
148 }
149
150 Ok(())
151 }
152
153 pub fn has_pending_pages(&self) -> bool {
155 !self.pending_pages.is_empty()
156 }
157
158 pub fn get(&self, key: &VoxelKey) -> Option<&HierarchyEntry> {
160 self.entries.get(key)
161 }
162
163 pub fn iter(&self) -> impl Iterator<Item = (&VoxelKey, &HierarchyEntry)> {
165 self.entries.iter()
166 }
167
168 pub fn len(&self) -> usize {
170 self.entries.len()
171 }
172
173 pub fn is_empty(&self) -> bool {
175 self.entries.is_empty()
176 }
177
178 fn parse_page(&mut self, data: &[u8], _base_offset: u64) -> Result<(), CopcError> {
180 let entry_size = 32; let mut r = Cursor::new(data);
182
183 while (r.position() as usize + entry_size) <= data.len() {
184 let level = r.read_i32::<LittleEndian>()?;
185 let x = r.read_i32::<LittleEndian>()?;
186 let y = r.read_i32::<LittleEndian>()?;
187 let z = r.read_i32::<LittleEndian>()?;
188 let offset = r.read_u64::<LittleEndian>()?;
189 let byte_size = r.read_i32::<LittleEndian>()?;
190 let point_count = r.read_i32::<LittleEndian>()?;
191
192 let key = VoxelKey { level, x, y, z };
193
194 if point_count == -1 {
195 self.pending_pages.push(PendingPage {
197 key,
198 offset,
199 size: byte_size as u64,
200 });
201 } else if point_count >= 0 && byte_size >= 0 {
202 self.entries.insert(
203 key,
204 HierarchyEntry {
205 key,
206 offset,
207 byte_size: byte_size as u32,
208 point_count: point_count as u32,
209 },
210 );
211 }
212 }
214
215 Ok(())
216 }
217}
218
219#[cfg(test)]
220mod tests {
221 use super::*;
222 use byteorder::WriteBytesExt;
223
224 fn write_hierarchy_entry(
225 buf: &mut Vec<u8>,
226 level: i32,
227 x: i32,
228 y: i32,
229 z: i32,
230 offset: u64,
231 byte_size: i32,
232 point_count: i32,
233 ) {
234 buf.write_i32::<LittleEndian>(level).unwrap();
235 buf.write_i32::<LittleEndian>(x).unwrap();
236 buf.write_i32::<LittleEndian>(y).unwrap();
237 buf.write_i32::<LittleEndian>(z).unwrap();
238 buf.write_u64::<LittleEndian>(offset).unwrap();
239 buf.write_i32::<LittleEndian>(byte_size).unwrap();
240 buf.write_i32::<LittleEndian>(point_count).unwrap();
241 }
242
243 fn build_two_child_source() -> (Vec<u8>, Aabb) {
252 let root_bounds = Aabb {
253 min: [0.0, 0.0, 0.0],
254 max: [100.0, 100.0, 100.0],
255 };
256
257 let mut left_page = Vec::new();
259 write_hierarchy_entry(&mut left_page, 2, 0, 0, 0, 9000, 64, 10);
261
262 let mut right_page = Vec::new();
263 write_hierarchy_entry(&mut right_page, 2, 2, 0, 0, 9500, 64, 20);
265
266 let mut root_page = Vec::new();
268 write_hierarchy_entry(&mut root_page, 0, 0, 0, 0, 1000, 256, 100);
269
270 let root_page_size = 3 * 32;
272 let left_page_offset = root_page_size as u64;
273 let right_page_offset = left_page_offset + left_page.len() as u64;
274
275 write_hierarchy_entry(
277 &mut root_page,
278 1,
279 0,
280 0,
281 0,
282 left_page_offset,
283 left_page.len() as i32,
284 -1,
285 );
286 write_hierarchy_entry(
288 &mut root_page,
289 1,
290 1,
291 0,
292 0,
293 right_page_offset,
294 right_page.len() as i32,
295 -1,
296 );
297
298 let mut source = root_page;
299 source.extend_from_slice(&left_page);
300 source.extend_from_slice(&right_page);
301
302 (source, root_bounds)
303 }
304
305 #[tokio::test]
306 async fn test_load_pages_for_bounds_filters_spatially() {
307 let (source, root_bounds) = build_two_child_source();
308
309 let mut cache = HierarchyCache::new();
310 cache.parse_page(&source[..96], 0).unwrap();
312
313 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); let left_query = Aabb {
318 min: [0.0, 0.0, 0.0],
319 max: [30.0, 100.0, 100.0],
320 };
321 cache
322 .load_pages_for_bounds(&source, &left_query, &root_bounds)
323 .await
324 .unwrap();
325
326 assert_eq!(cache.len(), 2); assert!(
329 cache
330 .get(&VoxelKey {
331 level: 2,
332 x: 0,
333 y: 0,
334 z: 0,
335 })
336 .is_some()
337 );
338
339 assert_eq!(cache.pending_pages.len(), 1);
341 assert_eq!(
342 cache.pending_pages[0].key,
343 VoxelKey {
344 level: 1,
345 x: 1,
346 y: 0,
347 z: 0
348 }
349 );
350
351 let right_query = Aabb {
353 min: [60.0, 0.0, 0.0],
354 max: [100.0, 100.0, 100.0],
355 };
356 cache
357 .load_pages_for_bounds(&source, &right_query, &root_bounds)
358 .await
359 .unwrap();
360
361 assert_eq!(cache.len(), 3); assert!(
363 cache
364 .get(&VoxelKey {
365 level: 2,
366 x: 2,
367 y: 0,
368 z: 0,
369 })
370 .is_some()
371 );
372 assert!(cache.pending_pages.is_empty());
373 }
374
375 #[tokio::test]
376 async fn test_parse_hierarchy_page() {
377 let mut page_data = Vec::new();
378 write_hierarchy_entry(&mut page_data, 0, 0, 0, 0, 1000, 256, 100);
379 write_hierarchy_entry(&mut page_data, 1, 0, 0, 0, 2000, 512, 50);
380 write_hierarchy_entry(&mut page_data, 1, 1, 0, 0, 5000, 128, -1);
381
382 let mut cache = HierarchyCache::new();
383 cache.parse_page(&page_data, 0).unwrap();
384
385 assert_eq!(cache.len(), 2); assert_eq!(cache.pending_pages.len(), 1); let root = cache
389 .get(&VoxelKey {
390 level: 0,
391 x: 0,
392 y: 0,
393 z: 0,
394 })
395 .unwrap();
396 assert_eq!(root.point_count, 100);
397 assert_eq!(root.offset, 1000);
398 }
399}