1use std::collections::HashMap;
6use std::io::Cursor;
7
8use crate::types::{Aabb, VoxelKey};
9use byteorder::{LittleEndian, ReadBytesExt};
10
11use crate::byte_source::ByteSource;
12use crate::error::CopcError;
13use crate::header::CopcInfo;
14
15#[derive(Debug, Clone)]
20#[non_exhaustive]
21pub struct HierarchyEntry {
22 pub key: VoxelKey,
24 pub offset: u64,
26 pub byte_size: u32,
28 pub point_count: u32,
30}
31
32#[derive(Debug, Clone)]
34struct PendingPage {
35 key: VoxelKey,
37 offset: u64,
38 size: u64,
39}
40
41pub struct HierarchyCache {
43 entries: HashMap<VoxelKey, HierarchyEntry>,
45 pending_pages: Vec<PendingPage>,
47 root_loaded: bool,
49}
50
51impl Default for HierarchyCache {
52 fn default() -> Self {
53 Self::new()
54 }
55}
56
57impl HierarchyCache {
58 pub fn new() -> Self {
60 Self {
61 entries: HashMap::new(),
62 pending_pages: Vec::new(),
63 root_loaded: false,
64 }
65 }
66
67 pub async fn load_root(
69 &mut self,
70 source: &impl ByteSource,
71 info: &CopcInfo,
72 ) -> Result<(), CopcError> {
73 if self.root_loaded {
74 return Ok(());
75 }
76 let data = source
77 .read_range(info.root_hier_offset, info.root_hier_size)
78 .await?;
79 self.parse_page(&data, info.root_hier_offset)?;
80 self.root_loaded = true;
81 Ok(())
82 }
83
84 pub async fn load_pending_pages(&mut self, source: &impl ByteSource) -> Result<(), CopcError> {
86 if self.pending_pages.is_empty() {
87 return Ok(());
88 }
89 let pages: Vec<_> = self.pending_pages.drain(..).collect();
90 let ranges: Vec<_> = pages.iter().map(|p| (p.offset, p.size)).collect();
91 let results = source.read_ranges(&ranges).await?;
92
93 for (page, data) in pages.iter().zip(results) {
94 self.parse_page(&data, page.offset)?;
95 }
96
97 Ok(())
98 }
99
100 pub async fn load_all(
102 &mut self,
103 source: &impl ByteSource,
104 info: &CopcInfo,
105 ) -> Result<(), CopcError> {
106 self.load_root(source, info).await?;
107
108 while !self.pending_pages.is_empty() {
109 self.load_pending_pages(source).await?;
110 }
111
112 Ok(())
113 }
114
115 pub async fn load_pages_for_bounds(
122 &mut self,
123 source: &impl ByteSource,
124 bounds: &Aabb,
125 root_bounds: &Aabb,
126 ) -> Result<(), CopcError> {
127 loop {
128 let matching: Vec<PendingPage> = self
129 .pending_pages
130 .iter()
131 .filter(|p| p.key.bounds(root_bounds).intersects(bounds))
132 .cloned()
133 .collect();
134
135 if matching.is_empty() {
136 break;
137 }
138
139 self.pending_pages
140 .retain(|p| !p.key.bounds(root_bounds).intersects(bounds));
141
142 let ranges: Vec<_> = matching.iter().map(|p| (p.offset, p.size)).collect();
143 let results = source.read_ranges(&ranges).await?;
144
145 for (page, data) in matching.iter().zip(results) {
146 self.parse_page(&data, page.offset)?;
147 }
148 }
149
150 Ok(())
151 }
152
153 pub async fn load_pages_for_bounds_to_level(
157 &mut self,
158 source: &impl ByteSource,
159 bounds: &Aabb,
160 root_bounds: &Aabb,
161 max_level: i32,
162 ) -> Result<(), CopcError> {
163 loop {
164 let matching: Vec<PendingPage> = self
165 .pending_pages
166 .iter()
167 .filter(|p| {
168 p.key.level <= max_level && p.key.bounds(root_bounds).intersects(bounds)
169 })
170 .cloned()
171 .collect();
172
173 if matching.is_empty() {
174 break;
175 }
176
177 self.pending_pages.retain(|p| {
178 !(p.key.level <= max_level && p.key.bounds(root_bounds).intersects(bounds))
179 });
180
181 let ranges: Vec<_> = matching.iter().map(|p| (p.offset, p.size)).collect();
182 let results = source.read_ranges(&ranges).await?;
183
184 for (page, data) in matching.iter().zip(results) {
185 self.parse_page(&data, page.offset)?;
186 }
187 }
188
189 Ok(())
190 }
191
192 pub fn has_pending_pages(&self) -> bool {
194 !self.pending_pages.is_empty()
195 }
196
197 pub fn get(&self, key: &VoxelKey) -> Option<&HierarchyEntry> {
199 self.entries.get(key)
200 }
201
202 pub fn iter(&self) -> impl Iterator<Item = (&VoxelKey, &HierarchyEntry)> {
204 self.entries.iter()
205 }
206
207 pub fn len(&self) -> usize {
209 self.entries.len()
210 }
211
212 pub fn is_empty(&self) -> bool {
214 self.entries.is_empty()
215 }
216
217 fn parse_page(&mut self, data: &[u8], _base_offset: u64) -> Result<(), CopcError> {
219 let entry_size = 32; let mut r = Cursor::new(data);
221
222 while (r.position() as usize + entry_size) <= data.len() {
223 let level = r.read_i32::<LittleEndian>()?;
224 let x = r.read_i32::<LittleEndian>()?;
225 let y = r.read_i32::<LittleEndian>()?;
226 let z = r.read_i32::<LittleEndian>()?;
227 let offset = r.read_u64::<LittleEndian>()?;
228 let byte_size = r.read_i32::<LittleEndian>()?;
229 let point_count = r.read_i32::<LittleEndian>()?;
230
231 let key = VoxelKey { level, x, y, z };
232
233 if point_count == -1 {
234 self.pending_pages.push(PendingPage {
236 key,
237 offset,
238 size: byte_size as u64,
239 });
240 } else if point_count >= 0 && byte_size >= 0 {
241 self.entries.insert(
242 key,
243 HierarchyEntry {
244 key,
245 offset,
246 byte_size: byte_size as u32,
247 point_count: point_count as u32,
248 },
249 );
250 }
251 }
253
254 Ok(())
255 }
256}
257
258#[cfg(test)]
259mod tests {
260 use super::*;
261 use byteorder::WriteBytesExt;
262
263 fn write_hierarchy_entry(
264 buf: &mut Vec<u8>,
265 level: i32,
266 x: i32,
267 y: i32,
268 z: i32,
269 offset: u64,
270 byte_size: i32,
271 point_count: i32,
272 ) {
273 buf.write_i32::<LittleEndian>(level).unwrap();
274 buf.write_i32::<LittleEndian>(x).unwrap();
275 buf.write_i32::<LittleEndian>(y).unwrap();
276 buf.write_i32::<LittleEndian>(z).unwrap();
277 buf.write_u64::<LittleEndian>(offset).unwrap();
278 buf.write_i32::<LittleEndian>(byte_size).unwrap();
279 buf.write_i32::<LittleEndian>(point_count).unwrap();
280 }
281
282 fn build_two_child_source() -> (Vec<u8>, Aabb) {
291 let root_bounds = Aabb {
292 min: [0.0, 0.0, 0.0],
293 max: [100.0, 100.0, 100.0],
294 };
295
296 let mut left_page = Vec::new();
298 write_hierarchy_entry(&mut left_page, 2, 0, 0, 0, 9000, 64, 10);
300
301 let mut right_page = Vec::new();
302 write_hierarchy_entry(&mut right_page, 2, 2, 0, 0, 9500, 64, 20);
304
305 let mut root_page = Vec::new();
307 write_hierarchy_entry(&mut root_page, 0, 0, 0, 0, 1000, 256, 100);
308
309 let root_page_size = 3 * 32;
311 let left_page_offset = root_page_size as u64;
312 let right_page_offset = left_page_offset + left_page.len() as u64;
313
314 write_hierarchy_entry(
316 &mut root_page,
317 1,
318 0,
319 0,
320 0,
321 left_page_offset,
322 left_page.len() as i32,
323 -1,
324 );
325 write_hierarchy_entry(
327 &mut root_page,
328 1,
329 1,
330 0,
331 0,
332 right_page_offset,
333 right_page.len() as i32,
334 -1,
335 );
336
337 let mut source = root_page;
338 source.extend_from_slice(&left_page);
339 source.extend_from_slice(&right_page);
340
341 (source, root_bounds)
342 }
343
344 #[tokio::test]
345 async fn test_load_pages_for_bounds_filters_spatially() {
346 let (source, root_bounds) = build_two_child_source();
347
348 let mut cache = HierarchyCache::new();
349 cache.parse_page(&source[..96], 0).unwrap();
351
352 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); let left_query = Aabb {
357 min: [0.0, 0.0, 0.0],
358 max: [30.0, 100.0, 100.0],
359 };
360 cache
361 .load_pages_for_bounds(&source, &left_query, &root_bounds)
362 .await
363 .unwrap();
364
365 assert_eq!(cache.len(), 2); assert!(
368 cache
369 .get(&VoxelKey {
370 level: 2,
371 x: 0,
372 y: 0,
373 z: 0,
374 })
375 .is_some()
376 );
377
378 assert_eq!(cache.pending_pages.len(), 1);
380 assert_eq!(
381 cache.pending_pages[0].key,
382 VoxelKey {
383 level: 1,
384 x: 1,
385 y: 0,
386 z: 0
387 }
388 );
389
390 let right_query = Aabb {
392 min: [60.0, 0.0, 0.0],
393 max: [100.0, 100.0, 100.0],
394 };
395 cache
396 .load_pages_for_bounds(&source, &right_query, &root_bounds)
397 .await
398 .unwrap();
399
400 assert_eq!(cache.len(), 3); assert!(
402 cache
403 .get(&VoxelKey {
404 level: 2,
405 x: 2,
406 y: 0,
407 z: 0,
408 })
409 .is_some()
410 );
411 assert!(cache.pending_pages.is_empty());
412 }
413
414 #[tokio::test]
415 async fn test_load_pages_for_bounds_to_level_stops_at_max_level() {
416 let (source, root_bounds) = build_two_child_source();
417
418 let mut cache = HierarchyCache::new();
419 cache.parse_page(&source[..96], 0).unwrap();
420
421 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); let full_query = Aabb {
427 min: [0.0, 0.0, 0.0],
428 max: [100.0, 100.0, 100.0],
429 };
430 cache
431 .load_pages_for_bounds_to_level(&source, &full_query, &root_bounds, 0)
432 .await
433 .unwrap();
434
435 assert_eq!(cache.len(), 1); assert_eq!(cache.pending_pages.len(), 2); cache
440 .load_pages_for_bounds_to_level(&source, &full_query, &root_bounds, 1)
441 .await
442 .unwrap();
443
444 assert_eq!(cache.len(), 3); assert!(cache.pending_pages.is_empty());
446 }
447
448 #[tokio::test]
449 async fn test_parse_hierarchy_page() {
450 let mut page_data = Vec::new();
451 write_hierarchy_entry(&mut page_data, 0, 0, 0, 0, 1000, 256, 100);
452 write_hierarchy_entry(&mut page_data, 1, 0, 0, 0, 2000, 512, 50);
453 write_hierarchy_entry(&mut page_data, 1, 1, 0, 0, 5000, 128, -1);
454
455 let mut cache = HierarchyCache::new();
456 cache.parse_page(&page_data, 0).unwrap();
457
458 assert_eq!(cache.len(), 2); assert_eq!(cache.pending_pages.len(), 1); let root = cache
462 .get(&VoxelKey {
463 level: 0,
464 x: 0,
465 y: 0,
466 z: 0,
467 })
468 .unwrap();
469 assert_eq!(root.point_count, 100);
470 assert_eq!(root.offset, 1000);
471 }
472}