1use std::collections::HashMap;
6use std::io::Cursor;
7
8use crate::types::{Aabb, VoxelKey};
9use byteorder::{LittleEndian, ReadBytesExt};
10
11use crate::byte_source::ByteSource;
12use crate::error::CopcError;
13use crate::header::CopcInfo;
14
15#[derive(Debug, Clone)]
20#[non_exhaustive]
21pub struct HierarchyEntry {
22 pub key: VoxelKey,
24 pub offset: u64,
26 pub byte_size: u32,
28 pub point_count: u32,
30}
31
32#[derive(Debug, Clone)]
34struct PendingPage {
35 key: VoxelKey,
37 offset: u64,
38 size: u64,
39}
40
41pub struct HierarchyCache {
43 entries: HashMap<VoxelKey, HierarchyEntry>,
45 pending_pages: Vec<PendingPage>,
47 root_loaded: bool,
49}
50
51impl Default for HierarchyCache {
52 fn default() -> Self {
53 Self::new()
54 }
55}
56
57impl HierarchyCache {
58 pub fn new() -> Self {
60 Self {
61 entries: HashMap::new(),
62 pending_pages: Vec::new(),
63 root_loaded: false,
64 }
65 }
66
67 pub async fn load_root(
69 &mut self,
70 source: &impl ByteSource,
71 info: &CopcInfo,
72 ) -> Result<(), CopcError> {
73 if self.root_loaded {
74 return Ok(());
75 }
76 let data = source
77 .read_range(info.root_hier_offset, info.root_hier_size)
78 .await?;
79 self.parse_page(&data, info.root_hier_offset)?;
80 self.root_loaded = true;
81 Ok(())
82 }
83
84 pub async fn load_pending_pages(&mut self, source: &impl ByteSource) -> Result<(), CopcError> {
86 if self.pending_pages.is_empty() {
87 return Ok(());
88 }
89 let pages: Vec<_> = self.pending_pages.drain(..).collect();
90 let ranges: Vec<_> = pages.iter().map(|p| (p.offset, p.size)).collect();
91 let results = source.read_ranges(&ranges).await?;
92
93 for (page, data) in pages.iter().zip(results) {
94 self.parse_page(&data, page.offset)?;
95 }
96
97 Ok(())
98 }
99
100 pub async fn load_all(
106 &mut self,
107 source: &impl ByteSource,
108 info: &CopcInfo,
109 ) -> Result<(), CopcError> {
110 self.load_root(source, info).await?;
111
112 while !self.pending_pages.is_empty() {
113 self.load_pending_pages(source).await?;
114 }
115
116 Ok(())
117 }
118
119 pub async fn load_pages_for_bounds(
126 &mut self,
127 source: &impl ByteSource,
128 bounds: &Aabb,
129 root_bounds: &Aabb,
130 ) -> Result<(), CopcError> {
131 loop {
132 let matching: Vec<PendingPage> = self
133 .pending_pages
134 .iter()
135 .filter(|p| p.key.bounds(root_bounds).intersects(bounds))
136 .cloned()
137 .collect();
138
139 if matching.is_empty() {
140 break;
141 }
142
143 self.pending_pages
144 .retain(|p| !p.key.bounds(root_bounds).intersects(bounds));
145
146 let ranges: Vec<_> = matching.iter().map(|p| (p.offset, p.size)).collect();
147 let results = source.read_ranges(&ranges).await?;
148
149 for (page, data) in matching.iter().zip(results) {
150 self.parse_page(&data, page.offset)?;
151 }
152 }
153
154 Ok(())
155 }
156
157 pub async fn load_pages_for_bounds_to_level(
161 &mut self,
162 source: &impl ByteSource,
163 bounds: &Aabb,
164 root_bounds: &Aabb,
165 max_level: i32,
166 ) -> Result<(), CopcError> {
167 loop {
168 let matching: Vec<PendingPage> = self
169 .pending_pages
170 .iter()
171 .filter(|p| {
172 p.key.level <= max_level && p.key.bounds(root_bounds).intersects(bounds)
173 })
174 .cloned()
175 .collect();
176
177 if matching.is_empty() {
178 break;
179 }
180
181 self.pending_pages.retain(|p| {
182 !(p.key.level <= max_level && p.key.bounds(root_bounds).intersects(bounds))
183 });
184
185 let ranges: Vec<_> = matching.iter().map(|p| (p.offset, p.size)).collect();
186 let results = source.read_ranges(&ranges).await?;
187
188 for (page, data) in matching.iter().zip(results) {
189 self.parse_page(&data, page.offset)?;
190 }
191 }
192
193 Ok(())
194 }
195
196 pub fn has_pending_pages(&self) -> bool {
198 !self.pending_pages.is_empty()
199 }
200
201 pub fn get(&self, key: &VoxelKey) -> Option<&HierarchyEntry> {
203 self.entries.get(key)
204 }
205
206 pub fn iter(&self) -> impl Iterator<Item = (&VoxelKey, &HierarchyEntry)> {
208 self.entries.iter()
209 }
210
211 pub fn len(&self) -> usize {
213 self.entries.len()
214 }
215
216 pub fn is_empty(&self) -> bool {
218 self.entries.is_empty()
219 }
220
221 fn parse_page(&mut self, data: &[u8], _base_offset: u64) -> Result<(), CopcError> {
223 let entry_size = 32; let mut r = Cursor::new(data);
225
226 while (r.position() as usize + entry_size) <= data.len() {
227 let level = r.read_i32::<LittleEndian>()?;
228 let x = r.read_i32::<LittleEndian>()?;
229 let y = r.read_i32::<LittleEndian>()?;
230 let z = r.read_i32::<LittleEndian>()?;
231 let offset = r.read_u64::<LittleEndian>()?;
232 let byte_size = r.read_i32::<LittleEndian>()?;
233 let point_count = r.read_i32::<LittleEndian>()?;
234
235 let key = VoxelKey { level, x, y, z };
236
237 if point_count == -1 {
238 self.pending_pages.push(PendingPage {
240 key,
241 offset,
242 size: byte_size as u64,
243 });
244 } else if point_count >= 0 && byte_size >= 0 {
245 self.entries.insert(
246 key,
247 HierarchyEntry {
248 key,
249 offset,
250 byte_size: byte_size as u32,
251 point_count: point_count as u32,
252 },
253 );
254 }
255 }
257
258 Ok(())
259 }
260}
261
262#[cfg(test)]
263mod tests {
264 use super::*;
265 use byteorder::WriteBytesExt;
266
267 fn write_hierarchy_entry(
268 buf: &mut Vec<u8>,
269 level: i32,
270 x: i32,
271 y: i32,
272 z: i32,
273 offset: u64,
274 byte_size: i32,
275 point_count: i32,
276 ) {
277 buf.write_i32::<LittleEndian>(level).unwrap();
278 buf.write_i32::<LittleEndian>(x).unwrap();
279 buf.write_i32::<LittleEndian>(y).unwrap();
280 buf.write_i32::<LittleEndian>(z).unwrap();
281 buf.write_u64::<LittleEndian>(offset).unwrap();
282 buf.write_i32::<LittleEndian>(byte_size).unwrap();
283 buf.write_i32::<LittleEndian>(point_count).unwrap();
284 }
285
286 fn build_two_child_source() -> (Vec<u8>, Aabb) {
295 let root_bounds = Aabb {
296 min: [0.0, 0.0, 0.0],
297 max: [100.0, 100.0, 100.0],
298 };
299
300 let mut left_page = Vec::new();
302 write_hierarchy_entry(&mut left_page, 2, 0, 0, 0, 9000, 64, 10);
304
305 let mut right_page = Vec::new();
306 write_hierarchy_entry(&mut right_page, 2, 2, 0, 0, 9500, 64, 20);
308
309 let mut root_page = Vec::new();
311 write_hierarchy_entry(&mut root_page, 0, 0, 0, 0, 1000, 256, 100);
312
313 let root_page_size = 3 * 32;
315 let left_page_offset = root_page_size as u64;
316 let right_page_offset = left_page_offset + left_page.len() as u64;
317
318 write_hierarchy_entry(
320 &mut root_page,
321 1,
322 0,
323 0,
324 0,
325 left_page_offset,
326 left_page.len() as i32,
327 -1,
328 );
329 write_hierarchy_entry(
331 &mut root_page,
332 1,
333 1,
334 0,
335 0,
336 right_page_offset,
337 right_page.len() as i32,
338 -1,
339 );
340
341 let mut source = root_page;
342 source.extend_from_slice(&left_page);
343 source.extend_from_slice(&right_page);
344
345 (source, root_bounds)
346 }
347
348 #[tokio::test]
349 async fn test_load_pages_for_bounds_filters_spatially() {
350 let (source, root_bounds) = build_two_child_source();
351
352 let mut cache = HierarchyCache::new();
353 cache.parse_page(&source[..96], 0).unwrap();
355
356 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); let left_query = Aabb {
361 min: [0.0, 0.0, 0.0],
362 max: [30.0, 100.0, 100.0],
363 };
364 cache
365 .load_pages_for_bounds(&source, &left_query, &root_bounds)
366 .await
367 .unwrap();
368
369 assert_eq!(cache.len(), 2); assert!(
372 cache
373 .get(&VoxelKey {
374 level: 2,
375 x: 0,
376 y: 0,
377 z: 0,
378 })
379 .is_some()
380 );
381
382 assert_eq!(cache.pending_pages.len(), 1);
384 assert_eq!(
385 cache.pending_pages[0].key,
386 VoxelKey {
387 level: 1,
388 x: 1,
389 y: 0,
390 z: 0
391 }
392 );
393
394 let right_query = Aabb {
396 min: [60.0, 0.0, 0.0],
397 max: [100.0, 100.0, 100.0],
398 };
399 cache
400 .load_pages_for_bounds(&source, &right_query, &root_bounds)
401 .await
402 .unwrap();
403
404 assert_eq!(cache.len(), 3); assert!(
406 cache
407 .get(&VoxelKey {
408 level: 2,
409 x: 2,
410 y: 0,
411 z: 0,
412 })
413 .is_some()
414 );
415 assert!(cache.pending_pages.is_empty());
416 }
417
418 #[tokio::test]
419 async fn test_load_pages_for_bounds_to_level_stops_at_max_level() {
420 let (source, root_bounds) = build_two_child_source();
421
422 let mut cache = HierarchyCache::new();
423 cache.parse_page(&source[..96], 0).unwrap();
424
425 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); let full_query = Aabb {
431 min: [0.0, 0.0, 0.0],
432 max: [100.0, 100.0, 100.0],
433 };
434 cache
435 .load_pages_for_bounds_to_level(&source, &full_query, &root_bounds, 0)
436 .await
437 .unwrap();
438
439 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); cache
444 .load_pages_for_bounds_to_level(&source, &full_query, &root_bounds, 1)
445 .await
446 .unwrap();
447
448 assert_eq!(cache.len(), 3); assert!(cache.pending_pages.is_empty());
450 }
451
452 #[tokio::test]
453 async fn test_parse_hierarchy_page() {
454 let mut page_data = Vec::new();
455 write_hierarchy_entry(&mut page_data, 0, 0, 0, 0, 1000, 256, 100);
456 write_hierarchy_entry(&mut page_data, 1, 0, 0, 0, 2000, 512, 50);
457 write_hierarchy_entry(&mut page_data, 1, 1, 0, 0, 5000, 128, -1);
458
459 let mut cache = HierarchyCache::new();
460 cache.parse_page(&page_data, 0).unwrap();
461
462 assert_eq!(cache.len(), 2); assert_eq!(cache.pending_pages.len(), 1); let root = cache
466 .get(&VoxelKey {
467 level: 0,
468 x: 0,
469 y: 0,
470 z: 0,
471 })
472 .unwrap();
473 assert_eq!(root.point_count, 100);
474 assert_eq!(root.offset, 1000);
475 }
476}