threecrate-algorithms 0.7.1

Algorithms for 3D point cloud and mesh processing
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
//! Streaming point cloud processing pipeline.
//!
//! Enables out-of-core processing of arbitrarily large point clouds by reading
//! and processing data in bounded-size chunks.  Only one chunk resides in RAM at
//! a time; the pipeline accumulates lightweight per-chunk state (e.g. a voxel map)
//! that is orders of magnitude smaller than the full dataset.
//!
//! # Architecture
//!
//! ```text
//! Source iterator                Pipeline stage           Output
//! (file / network / …)  ──►  process_chunk(&[T])  ──►  finalize()
//!      chunk 0                  accumulate state
//!      chunk 1                  accumulate state
//!      …                        …
//!      chunk N                  accumulate state
//! ```
//!
//! # Provided pipelines
//!
//! | Type | Description |
//! |---|---|
//! | [`StreamingVoxelFilter`] | Downsamples via a voxel grid; O(voxels) memory |
//! | [`StreamingStatistics`] | Accumulates bounding-box and point count |
//! | [`StreamingCollector`] | Collects all points (useful for testing) |
//!
//! # Example
//!
//! ```rust
//! use threecrate_algorithms::streaming::{
//!     StreamingPipeline, StreamingVoxelFilter, StreamingVoxelFilterConfig, run_pipeline,
//! };
//! use threecrate_core::Point3f;
//!
//! let points: Vec<Result<Point3f, _>> = vec![
//!     Ok(Point3f::new(0.0, 0.0, 0.0)),
//!     Ok(Point3f::new(0.05, 0.0, 0.0)),
//!     Ok(Point3f::new(1.0, 1.0, 1.0)),
//! ];
//! let mut filter = StreamingVoxelFilter::new(StreamingVoxelFilterConfig { voxel_size: 0.1 });
//! let stats = run_pipeline(&mut filter, points.into_iter(), 2).unwrap();
//! let cloud = filter.finalize().unwrap();
//! println!("Downsampled to {} points", cloud.len());
//! ```

use std::collections::HashMap;
use threecrate_core::{Error, Point3f, PointCloud, Result};

// ---------------------------------------------------------------------------
// Core trait
// ---------------------------------------------------------------------------

/// Trait for chunk-based streaming processors.
///
/// Implementations accumulate state across calls to [`process_chunk`] and
/// produce a final result via [`finalize`].  The chunk size controls peak RAM
/// usage: smaller chunks use less memory at the cost of more function-call
/// overhead.
///
/// [`process_chunk`]: StreamingPipeline::process_chunk
/// [`finalize`]: StreamingPipeline::finalize
pub trait StreamingPipeline<T> {
    /// The type produced after all chunks have been processed.
    type Output;

    /// Ingest one chunk of items.  Called repeatedly until the source is
    /// exhausted.  `chunk` will never be empty.
    fn process_chunk(&mut self, chunk: &[T]) -> Result<()>;

    /// Consume the pipeline and return the accumulated output.
    fn finalize(self) -> Result<Self::Output>;

    /// Estimated number of bytes currently held by this pipeline stage.
    /// Default returns `0`; override to expose real memory usage.
    fn memory_bytes(&self) -> usize { 0 }
}

// ---------------------------------------------------------------------------
// Pipeline runner
// ---------------------------------------------------------------------------

/// Statistics reported by [`run_pipeline`].
#[derive(Debug, Clone, Default)]
pub struct RunStats {
    /// Total number of successfully processed items.
    pub items_processed: usize,
    /// Number of chunks delivered to the pipeline.
    pub chunks_processed: usize,
    /// Number of items skipped due to errors (if `skip_errors` is set).
    pub errors_skipped: usize,
}

/// Options for [`run_pipeline`].
#[derive(Debug, Clone)]
pub struct RunOptions {
    /// If `true`, item-level errors from the source iterator are counted and
    /// skipped rather than causing [`run_pipeline`] to return early.
    /// Default: `false`.
    pub skip_errors: bool,
}

impl Default for RunOptions {
    fn default() -> Self { Self { skip_errors: false } }
}

/// Drive `pipeline` by reading from `source` in chunks of `chunk_size` items.
///
/// Returns [`RunStats`] on success.  The iterator's item errors are propagated
/// unless [`RunOptions::skip_errors`] is set.
///
/// # Arguments
/// * `pipeline`   – A mutable reference to a [`StreamingPipeline`].
/// * `source`     – Any iterator whose items are `Result<T>`.
/// * `chunk_size` – Number of items to accumulate before calling
///                  [`StreamingPipeline::process_chunk`].  Must be ≥ 1.
pub fn run_pipeline<T, P>(
    pipeline: &mut P,
    source: impl Iterator<Item = Result<T>>,
    chunk_size: usize,
) -> Result<RunStats>
where
    P: StreamingPipeline<T>,
{
    run_pipeline_with_options(pipeline, source, chunk_size, &RunOptions::default())
}

/// Like [`run_pipeline`] but accepts explicit options.
pub fn run_pipeline_with_options<T, P>(
    pipeline: &mut P,
    source: impl Iterator<Item = Result<T>>,
    chunk_size: usize,
    opts: &RunOptions,
) -> Result<RunStats>
where
    P: StreamingPipeline<T>,
{
    if chunk_size == 0 {
        return Err(Error::InvalidData("chunk_size must be ≥ 1".into()));
    }

    let mut stats = RunStats::default();
    let mut chunk: Vec<T> = Vec::with_capacity(chunk_size);

    for item in source {
        match item {
            Ok(point) => {
                chunk.push(point);
                if chunk.len() == chunk_size {
                    pipeline.process_chunk(&chunk)?;
                    stats.items_processed += chunk.len();
                    stats.chunks_processed += 1;
                    chunk.clear();
                }
            }
            Err(e) => {
                if opts.skip_errors {
                    stats.errors_skipped += 1;
                } else {
                    return Err(e);
                }
            }
        }
    }

    // Flush any remaining items.
    if !chunk.is_empty() {
        pipeline.process_chunk(&chunk)?;
        stats.items_processed += chunk.len();
        stats.chunks_processed += 1;
    }

    Ok(stats)
}

// ---------------------------------------------------------------------------
// StreamingVoxelFilter
// ---------------------------------------------------------------------------

/// Configuration for [`StreamingVoxelFilter`].
#[derive(Debug, Clone)]
pub struct StreamingVoxelFilterConfig {
    /// Side length of each cubic voxel (same units as the point coordinates).
    /// Must be positive.
    pub voxel_size: f32,
}

/// Streaming voxel-grid downsampler.
///
/// Maintains a [`HashMap`] from voxel coordinates to a representative point.
/// Peak memory is `O(V)` where `V` is the number of occupied voxels in the
/// entire dataset — typically far smaller than N points.
///
/// Unlike the in-memory [`voxel_grid_filter`](crate::filtering::voxel_grid_filter),
/// no bounding-box pre-scan is required; voxel keys are derived by dividing
/// each coordinate by `voxel_size` and rounding toward negative infinity, so
/// they are consistent across all chunks.
///
/// The representative point for each voxel is the **centroid** of all points
/// assigned to that voxel, giving a smoother result than first-point selection.
pub struct StreamingVoxelFilter {
    config: StreamingVoxelFilterConfig,
    /// Accumulated sum and count for centroid computation.
    voxels: HashMap<(i32, i32, i32), ([f64; 3], u32)>,
}

impl StreamingVoxelFilter {
    /// Create a new streaming voxel filter.
    pub fn new(config: StreamingVoxelFilterConfig) -> Self {
        Self { config, voxels: HashMap::new() }
    }

    #[inline]
    fn voxel_key(&self, p: &Point3f) -> (i32, i32, i32) {
        let inv = 1.0 / self.config.voxel_size;
        (
            (p.x * inv).floor() as i32,
            (p.y * inv).floor() as i32,
            (p.z * inv).floor() as i32,
        )
    }

    /// Number of occupied voxels accumulated so far.
    pub fn voxel_count(&self) -> usize { self.voxels.len() }
}

impl StreamingPipeline<Point3f> for StreamingVoxelFilter {
    type Output = PointCloud<Point3f>;

    fn process_chunk(&mut self, chunk: &[Point3f]) -> Result<()> {
        if self.config.voxel_size <= 0.0 {
            return Err(Error::InvalidData("voxel_size must be positive".into()));
        }
        for p in chunk {
            let key = self.voxel_key(p);
            let entry = self.voxels.entry(key).or_insert(([0.0; 3], 0));
            entry.0[0] += p.x as f64;
            entry.0[1] += p.y as f64;
            entry.0[2] += p.z as f64;
            entry.1 += 1;
        }
        Ok(())
    }

    fn finalize(self) -> Result<PointCloud<Point3f>> {
        let points: Vec<Point3f> = self
            .voxels
            .values()
            .map(|(sum, count)| {
                let n = *count as f64;
                Point3f::new((sum[0] / n) as f32, (sum[1] / n) as f32, (sum[2] / n) as f32)
            })
            .collect();
        Ok(PointCloud::from_points(points))
    }

    fn memory_bytes(&self) -> usize {
        // Each entry: key (12 bytes) + value (28 bytes) + HashMap overhead (~50 bytes).
        self.voxels.len() * 90
    }
}

// ---------------------------------------------------------------------------
// StreamingStatistics
// ---------------------------------------------------------------------------

/// Accumulated statistics produced by [`StreamingStatistics`].
#[derive(Debug, Clone)]
pub struct PointCloudStats {
    /// Total number of points processed.
    pub point_count: u64,
    /// Minimum coordinate (axis-aligned bounding box corner).
    pub min: Point3f,
    /// Maximum coordinate (axis-aligned bounding box corner).
    pub max: Point3f,
    /// Per-axis mean coordinates.
    pub mean: Point3f,
}

/// Streaming statistics collector.
///
/// Computes bounding box, point count, and mean position in a single pass
/// without retaining any individual points.  Peak memory is `O(1)`.
pub struct StreamingStatistics {
    count: u64,
    min: [f32; 3],
    max: [f32; 3],
    sum: [f64; 3],
}

impl StreamingStatistics {
    /// Create a new statistics collector.
    pub fn new() -> Self {
        Self {
            count: 0,
            min: [f32::INFINITY; 3],
            max: [f32::NEG_INFINITY; 3],
            sum: [0.0; 3],
        }
    }
}

impl Default for StreamingStatistics {
    fn default() -> Self { Self::new() }
}

impl StreamingPipeline<Point3f> for StreamingStatistics {
    type Output = PointCloudStats;

    fn process_chunk(&mut self, chunk: &[Point3f]) -> Result<()> {
        for p in chunk {
            self.count += 1;
            self.min[0] = self.min[0].min(p.x);
            self.min[1] = self.min[1].min(p.y);
            self.min[2] = self.min[2].min(p.z);
            self.max[0] = self.max[0].max(p.x);
            self.max[1] = self.max[1].max(p.y);
            self.max[2] = self.max[2].max(p.z);
            self.sum[0] += p.x as f64;
            self.sum[1] += p.y as f64;
            self.sum[2] += p.z as f64;
        }
        Ok(())
    }

    fn finalize(self) -> Result<PointCloudStats> {
        if self.count == 0 {
            return Err(Error::InvalidData("no points were processed".into()));
        }
        let n = self.count as f64;
        Ok(PointCloudStats {
            point_count: self.count,
            min: Point3f::new(self.min[0], self.min[1], self.min[2]),
            max: Point3f::new(self.max[0], self.max[1], self.max[2]),
            mean: Point3f::new(
                (self.sum[0] / n) as f32,
                (self.sum[1] / n) as f32,
                (self.sum[2] / n) as f32,
            ),
        })
    }

    fn memory_bytes(&self) -> usize { std::mem::size_of::<Self>() }
}

// ---------------------------------------------------------------------------
// StreamingCollector
// ---------------------------------------------------------------------------

/// Streaming pipeline stage that collects all points into a `PointCloud`.
///
/// Useful for testing or as a terminal stage when the full cloud must
/// eventually be materialized (e.g. after prior stages have filtered it down).
pub struct StreamingCollector {
    points: Vec<Point3f>,
}

impl StreamingCollector {
    /// Create a new collector.
    pub fn new() -> Self { Self { points: Vec::new() } }

    /// Create a collector with pre-allocated capacity.
    pub fn with_capacity(cap: usize) -> Self {
        Self { points: Vec::with_capacity(cap) }
    }
}

impl Default for StreamingCollector {
    fn default() -> Self { Self::new() }
}

impl StreamingPipeline<Point3f> for StreamingCollector {
    type Output = PointCloud<Point3f>;

    fn process_chunk(&mut self, chunk: &[Point3f]) -> Result<()> {
        self.points.extend_from_slice(chunk);
        Ok(())
    }

    fn finalize(self) -> Result<PointCloud<Point3f>> {
        Ok(PointCloud::from_points(self.points))
    }

    fn memory_bytes(&self) -> usize {
        self.points.len() * std::mem::size_of::<Point3f>()
    }
}

// ---------------------------------------------------------------------------
// Streaming source helpers
// ---------------------------------------------------------------------------

/// Wrap a `PointCloud` as a streaming source of `Result<Point3f>`.
///
/// Useful for testing pipelines without a file on disk.
pub fn cloud_as_stream(
    cloud: &PointCloud<Point3f>,
) -> impl Iterator<Item = Result<Point3f>> + '_ {
    cloud.points.iter().copied().map(Ok)
}

// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------

#[cfg(test)]
mod tests {
    use super::*;

    fn grid_cloud(n: usize) -> PointCloud<Point3f> {
        let pts: Vec<Point3f> = (0..n)
            .map(|i| Point3f::new(i as f32 * 0.1, 0.0, 0.0))
            .collect();
        PointCloud::from_points(pts)
    }

    // ---- StreamingCollector -----------------------------------------------

    #[test]
    fn test_collector_round_trip() {
        let cloud = grid_cloud(25);
        let mut collector = StreamingCollector::new();
        let stats = run_pipeline(&mut collector, cloud_as_stream(&cloud), 8).unwrap();
        let out = collector.finalize().unwrap();

        assert_eq!(stats.items_processed, 25);
        assert_eq!(stats.chunks_processed, 4); // 8+8+8+1
        assert_eq!(out.len(), 25);
    }

    // ---- StreamingStatistics ----------------------------------------------

    #[test]
    fn test_statistics_correctness() {
        // Three points: (0,0,0), (1,0,0), (2,0,0)
        let cloud = PointCloud::from_points(vec![
            Point3f::new(0.0, 0.0, 0.0),
            Point3f::new(1.0, 0.0, 0.0),
            Point3f::new(2.0, 0.0, 0.0),
        ]);
        let mut stats_pipe = StreamingStatistics::new();
        run_pipeline(&mut stats_pipe, cloud_as_stream(&cloud), 2).unwrap();
        let s = stats_pipe.finalize().unwrap();

        assert_eq!(s.point_count, 3);
        assert!((s.min.x - 0.0).abs() < 1e-6);
        assert!((s.max.x - 2.0).abs() < 1e-6);
        assert!((s.mean.x - 1.0).abs() < 1e-6);
    }

    #[test]
    fn test_statistics_empty_fails() {
        let mut stats_pipe = StreamingStatistics::new();
        // No chunks processed — finalize should error.
        assert!(stats_pipe.finalize().is_err());
    }

    // ---- StreamingVoxelFilter ---------------------------------------------

    #[test]
    fn test_voxel_filter_reduces_density() {
        // 100 points at x=0..9.9 in steps of 0.1 → should collapse to ~10 voxels
        // with voxel_size=1.0.
        let cloud = grid_cloud(100);
        let config = StreamingVoxelFilterConfig { voxel_size: 1.0 };
        let mut filter = StreamingVoxelFilter::new(config);
        run_pipeline(&mut filter, cloud_as_stream(&cloud), 32).unwrap();
        let out = filter.finalize().unwrap();

        assert!(out.len() <= 10, "expected ≤10 voxels, got {}", out.len());
        assert!(!out.is_empty());
    }

    #[test]
    fn test_voxel_filter_centroid() {
        // Two points in the same voxel → centroid should be their midpoint.
        let cloud = PointCloud::from_points(vec![
            Point3f::new(0.1, 0.0, 0.0),
            Point3f::new(0.3, 0.0, 0.0),
        ]);
        let config = StreamingVoxelFilterConfig { voxel_size: 1.0 };
        let mut filter = StreamingVoxelFilter::new(config);
        run_pipeline(&mut filter, cloud_as_stream(&cloud), 10).unwrap();
        let out = filter.finalize().unwrap();

        assert_eq!(out.len(), 1);
        assert!((out.points[0].x - 0.2).abs() < 1e-5);
    }

    #[test]
    fn test_voxel_filter_across_chunk_boundary() {
        // The two points that belong to the same voxel are split across chunks.
        // The filter must still merge them.
        let cloud = PointCloud::from_points(vec![
            Point3f::new(0.1, 0.0, 0.0),
            Point3f::new(0.9, 0.0, 0.0),
        ]);
        let config = StreamingVoxelFilterConfig { voxel_size: 1.0 };
        let mut filter = StreamingVoxelFilter::new(config);
        // chunk_size=1 forces each point into its own chunk.
        run_pipeline(&mut filter, cloud_as_stream(&cloud), 1).unwrap();
        let out = filter.finalize().unwrap();

        assert_eq!(out.len(), 1, "points in the same voxel across chunks should merge");
        assert!((out.points[0].x - 0.5).abs() < 1e-5);
    }

    #[test]
    fn test_invalid_voxel_size() {
        let config = StreamingVoxelFilterConfig { voxel_size: -1.0 };
        let mut filter = StreamingVoxelFilter::new(config);
        let cloud = PointCloud::from_points(vec![Point3f::new(0.0, 0.0, 0.0)]);
        let result = run_pipeline(&mut filter, cloud_as_stream(&cloud), 1);
        assert!(result.is_err());
    }

    // ---- run_pipeline options --------------------------------------------

    #[test]
    fn test_skip_errors() {
        let source: Vec<Result<Point3f>> = vec![
            Ok(Point3f::new(0.0, 0.0, 0.0)),
            Err(Error::InvalidData("bad point".into())),
            Ok(Point3f::new(1.0, 0.0, 0.0)),
        ];
        let mut collector = StreamingCollector::new();
        let run_stats = run_pipeline_with_options(
            &mut collector,
            source.into_iter(),
            10,
            &RunOptions { skip_errors: true },
        )
        .unwrap();
        let out = collector.finalize().unwrap();
        assert_eq!(out.len(), 2);
        assert_eq!(run_stats.errors_skipped, 1);
    }

    #[test]
    fn test_error_propagation() {
        let source: Vec<Result<Point3f>> = vec![
            Ok(Point3f::new(0.0, 0.0, 0.0)),
            Err(Error::InvalidData("bad point".into())),
        ];
        let mut collector = StreamingCollector::new();
        // Default options: errors propagate.
        assert!(run_pipeline(&mut collector, source.into_iter(), 10).is_err());
    }

    #[test]
    fn test_chunk_size_zero_fails() {
        let mut collector = StreamingCollector::new();
        let result = run_pipeline(
            &mut collector,
            std::iter::empty::<Result<Point3f>>(),
            0,
        );
        assert!(result.is_err());
    }

    #[test]
    fn test_memory_bytes() {
        let mut filter = StreamingVoxelFilter::new(StreamingVoxelFilterConfig { voxel_size: 0.5 });
        let cloud = grid_cloud(20);
        run_pipeline(&mut filter, cloud_as_stream(&cloud), 5).unwrap();
        // At least some voxels should be occupied and memory should be > 0.
        assert!(filter.memory_bytes() > 0);
    }
}