all_is_cubes_mesh/depth_sorting.rs
1//! Types and algorithms for depth sorting.
2
3use alloc::vec::Vec;
4use core::cmp::Ordering;
5use core::fmt;
6use core::ops::{self, Range};
7
8use exhaust::Exhaust as _;
9use ordered_float::OrderedFloat;
10use smallvec::SmallVec;
11
12use all_is_cubes::euclid::{self, Vector3D, vec3};
13use all_is_cubes::math::lines::Wireframe as _;
14use all_is_cubes::math::{Axis, Face6, FaceMap, GridRotation, lines};
15
16use crate::{
17 Aabb, IndexInt, IndexSliceMut, IndexVec, MeshRel, MeshTypes, PosCoord, Position,
18 TransparentMeta, Vertex,
19};
20#[cfg(doc)]
21use crate::{MeshMeta, SpaceMesh};
22
23// -------------------------------------------------------------------------------------------------
24
25/// Sorting and culling of transparent triangles of a [`SpaceMesh`].
26///
27/// Each value of this type identifies an ordering for depth sorting, and a filter for culling,
28/// based on the direction from which the mesh is being viewed.
29///
30/// Create this using [`DepthOrdering::from_view_of_aabb()`],
31/// then use it in [`MeshMeta::transparent_range()`].
32#[derive(Copy, Clone, Eq, Hash, PartialEq)]
33pub struct DepthOrdering(
34 /// Specifies the relationship which the viewpoint has to the viewed mesh’s bounding box,
35 /// per axis.
36 Vector3D<Rel, ()>,
37);
38
39/// Relationship of the viewpoint to the mesh on one axis.
40#[derive(Clone, Copy, Eq, Hash, PartialEq, exhaust::Exhaust)]
41#[doc(hidden)] // public-in-private just for convenience in the `Exhaust` implementation.
42#[allow(unnameable_types)]
43pub enum Rel {
44 /// The viewpoint is lower on this axis than the bounding box.
45 Lower = 0,
46 /// The viewpoint is within the bounding box on this axis.
47 Within = 1,
48 /// The viewpoint is higher on this axis than the bounding box.
49 Higher = 2,
50}
51
52impl DepthOrdering {
53 /// An arbitrary choice of ordering, with no culling.
54 ///
55 /// Use this to read the mesh’s indices without regard for view direction,
56 /// such as to prepare an unsorted, unculled export.
57 //---
58 // Note: The use of `WITHIN` is because we omit known back-faces from the other options,
59 // whereas `WITHIN` always has to be complete — well, except that it could omit faces that are
60 // _on_ the bounding box which cannot be seen from within, but that is not currently
61 // implemented. If we did implement that culling, we would need to define a separate value for
62 // this purpose.
63 //
64 // The superfluous `const` block nudges rustdoc into not showing the value.
65 pub const ALL_UNSORTED: Self = const { Self::WITHIN };
66
67 /// The viewpoint is within the volume; therefore dynamic rather than precomputed
68 /// sorting must be used.
69 pub(crate) const WITHIN: Self = Self(vec3(Rel::Within, Rel::Within, Rel::Within));
70
71 /// Number of distinct [`Self`] values; one plus the maximum of [`Self::to_index()`].
72 pub(crate) const COUNT: usize = 3_usize.pow(3);
73
74 /// Maps `self` to a unique integer.
75 pub(crate) fn to_index(self) -> usize {
76 let [x, y, z] = self.0.into();
77
78 // This is the same ordering as `exhaust()` gives,
79 // which matters for debug formatting elsewhere but not for functionality.
80 (x as usize * 3 + y as usize) * 3 + z as usize
81 }
82
83 /// Calculates the [`DepthOrdering`] value suitable for `camera_position` viewing a mesh with
84 /// bounds `geometry_bounds`.
85 ///
86 /// The coordinate system used for the provided point and bounding box does not matter as
87 /// long as they use the same one.
88 pub fn from_view_of_aabb<U>(
89 camera_position: euclid::Point3D<f64, U>,
90 geometry_bounds: impl Into<euclid::Box3D<f64, U>>,
91 ) -> DepthOrdering {
92 fn inner(
93 camera_position: euclid::Point3D<f64, euclid::UnknownUnit>,
94 geometry_bounds: euclid::Box3D<f64, euclid::UnknownUnit>,
95 ) -> DepthOrdering {
96 let mut ord = DepthOrdering::WITHIN;
97 for axis in Axis::ALL {
98 if camera_position[axis] < geometry_bounds.min[axis] {
99 ord.0[axis] = Rel::Lower
100 } else if camera_position[axis] > geometry_bounds.max[axis] {
101 ord.0[axis] = Rel::Higher
102 }
103 }
104
105 ord
106 }
107
108 inner(
109 camera_position.to_untyped(),
110 geometry_bounds.into().to_untyped(),
111 )
112 }
113
114 /// Counts how many axes have the viewpoint within the bounding box when projected on that axis.
115 pub(crate) fn within_on_axes(self) -> u8 {
116 let Self(Vector3D { x, y, z, .. }) = self;
117 u8::from(x == Rel::Within) + u8::from(y == Rel::Within) + u8::from(z == Rel::Within)
118 }
119
120 /// Returns a rotation which rotates vertex positions into positions whose lexicographic
121 /// ordering is this ordering.
122 fn sort_key_rotation(self) -> GridRotation {
123 // Find the axis permutation that puts the `Within` axes last,
124 // to support the partly-static sorting of partly-`Within` orderings.
125 //
126 // (This is defined as the inverse permutation because the way `GridRotation` names work
127 // makes it easier to read that way.)
128 let inverse_permutation = if self.0.x == Rel::Within {
129 if self.0.y == Rel::Within {
130 GridRotation::RZYX
131 } else {
132 // either X and Z are Within, or only X is
133 GridRotation::RYZX
134 }
135 } else if self.0.y == Rel::Within {
136 // Y is Within and X is not
137 GridRotation::RXZY
138 } else {
139 // either only Z is Within or nothing is
140 GridRotation::RXYZ
141 };
142
143 // Find which axes need to be negated to get a nonnegative result.
144 let flips = if self.0.x == Rel::Lower {
145 GridRotation::RxYZ
146 } else {
147 GridRotation::IDENTITY
148 } * if self.0.y == Rel::Lower {
149 GridRotation::RXyZ
150 } else {
151 GridRotation::IDENTITY
152 } * if self.0.z == Rel::Lower {
153 GridRotation::RXYz
154 } else {
155 GridRotation::IDENTITY
156 };
157
158 // Compose the transformations.
159 inverse_permutation.inverse() * flips
160 }
161
162 /// Returns whether a triangle with the given orientation may be visible from this ordering.
163 ///
164 /// For example, if this ordering is out of bounds in the negative X direction, then any
165 /// [`Face6::PX`] cannot possibly be visible.
166 fn face_visible_from_here(self, face: Face6) -> bool {
167 self.0[face.axis()]
168 != if face.is_negative() {
169 Rel::Higher
170 } else {
171 Rel::Lower
172 }
173 }
174
175 /// Draw a ray pointing towards the applicable corner, edge, or face of `bb`.
176 #[cfg_attr(not(feature = "dynamic"), expect(dead_code))]
177 pub(crate) fn debug_lines(self, bb: Aabb, output: &mut impl Extend<[lines::Vertex; 2]>) {
178 if self == Self::WITHIN {
179 // TODO: draw a marker for this case
180 return;
181 }
182
183 let bb = euclid::Box3D::from(bb);
184 let direction = self
185 .0
186 .map(|rel| match rel {
187 Rel::Lower => 1.,
188 Rel::Within => 0.,
189 Rel::Higher => -1.,
190 })
191 .cast_unit::<all_is_cubes::math::Cube>()
192 .normalize()
193 * 5.0;
194 let mut corner = euclid::Point3D::zero();
195 for axis in Axis::ALL {
196 corner[axis] = match self.0[axis] {
197 Rel::Lower => bb.min[axis],
198 Rel::Within => bb.center()[axis],
199 Rel::Higher => bb.max[axis],
200 }
201 }
202 all_is_cubes::raycast::Ray {
203 origin: (corner - direction).to_f64(),
204 direction: direction.to_f64().cast_unit(),
205 }
206 .wireframe_points(output);
207 }
208}
209
210// This explicit impl is needed because Vector3D doesn't implement Exhaust
211impl exhaust::Exhaust for DepthOrdering {
212 type Iter = <[Rel; 3] as exhaust::Exhaust>::Iter;
213 type Factory = <[Rel; 3] as exhaust::Exhaust>::Factory;
214
215 fn exhaust_factories() -> Self::Iter {
216 <[Rel; 3]>::exhaust_factories()
217 }
218
219 fn from_factory(factory: Self::Factory) -> Self {
220 Self(factory.map(Rel::from_factory).into())
221 }
222}
223
224impl fmt::Debug for DepthOrdering {
225 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
226 for axis in Axis::ALL {
227 self.0[axis].fmt(f)?;
228 }
229 Ok(())
230 }
231}
232
233impl fmt::Debug for Rel {
234 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
235 f.pad(match self {
236 Rel::Lower => "−", // U+2212 MINUS SIGN for best symmetry
237 Rel::Within => "W",
238 Rel::Higher => "+",
239 })
240 }
241}
242
243// -------------------------------------------------------------------------------------------------
244
245/// Outcome of [`SpaceMesh::depth_sort_for_view()`], specifying what changed.
246#[derive(Clone, Debug, Eq, PartialEq)]
247#[allow(clippy::exhaustive_structs)]
248#[must_use]
249pub struct DepthSortResult {
250 /// If the order actually changed as a result of sorting, contains
251 /// the range of the mesh’s indices which were modified by the depth sort operation.
252 ///
253 /// This may be used to determine, for example, whether the newly sorted indices need to be
254 /// copied to a GPU buffer.
255 pub changed: Option<Range<usize>>,
256
257 /// Performance information about the sorting operation.
258 pub info: DepthSortInfo,
259}
260
261/// Performance information returned by [`SpaceMesh::depth_sort_for_view()`].
262///
263/// Format this with [`fmt::Debug`] to see its information.
264#[derive(Clone, Copy, Debug, Eq, PartialEq)]
265#[non_exhaustive]
266pub struct DepthSortInfo {
267 /// How many individual items (triangles or rectangles) were in the data to be sorted.
268 #[doc(hidden)] // public for benchmark checking whether depth sorting happened as expected
269 pub elements_sorted: usize,
270
271 /// How many independent sorting operations were performed.
272 ///
273 /// All else being equal, it is better if this number is larger for a given `elements_sorted`,
274 /// since the cost of sorting grows faster than linear.
275 pub(crate) groups_sorted: usize,
276
277 /// Number of lazy “static” sorting operations performed.
278 /// These use a different comparison function and are only performed once per mesh per
279 /// [`DepthOrdering`].
280 pub(crate) static_groups_sorted: usize,
281}
282
283impl DepthSortResult {
284 const NOTHING_CHANGED: Self = Self {
285 changed: None,
286 info: DepthSortInfo::DEFAULT,
287 };
288}
289
290impl DepthSortInfo {
291 const DEFAULT: Self = Self {
292 elements_sorted: 0,
293 groups_sorted: 0,
294 static_groups_sorted: 0,
295 };
296}
297
298impl Default for DepthSortInfo {
299 fn default() -> Self {
300 Self::DEFAULT
301 }
302}
303
304impl ops::AddAssign for DepthSortInfo {
305 fn add_assign(&mut self, rhs: Self) {
306 let Self {
307 elements_sorted,
308 groups_sorted,
309 static_groups_sorted,
310 } = self;
311 *elements_sorted += rhs.elements_sorted;
312 *groups_sorted += rhs.groups_sorted;
313 *static_groups_sorted += rhs.static_groups_sorted;
314 }
315}
316
317// -------------------------------------------------------------------------------------------------
318
319/// An individual depth-sortable item.
320/// Implemented for array chunks of indices.
321///
322/// (This is not literally a GPU drawing primitive, but for our purposes it might as well be.)
323trait Primitive: Copy + Sized {
324 type Item: IndexInt;
325 const LEN: usize;
326 /// Divide the slice reference into chunks. Panics if the slice is not divisible.
327 fn as_chunks(slice: &[Self::Item]) -> &[Self];
328 /// Divide the exclusive slice reference into chunks. Panics if the slice is not divisible.
329 fn as_chunks_mut(slice: &mut [Self::Item]) -> &mut [Self];
330 /// Return 3 indices which together form a bounding box for the primitive.
331 fn indices_for_sorting(&self) -> [Self::Item; 3];
332}
333/// Rectangles (made of 2 triangles).
334/// Note that this does not work for general quadrilaterals because
335/// the full bounding box would not be considered.
336impl<Ix: IndexInt> Primitive for [Ix; 6] {
337 type Item = Ix;
338 const LEN: usize = 6;
339 fn as_chunks(slice: &[Self::Item]) -> &[Self] {
340 let (rects, rest) = slice.as_chunks();
341 assert_eq!(
342 rest.len(),
343 0,
344 "expected an index list divisible into rectangles"
345 );
346 rects
347 }
348 fn as_chunks_mut(slice: &mut [Self::Item]) -> &mut [Self] {
349 let (rects, rest) = slice.as_chunks_mut();
350 assert_eq!(
351 rest.len(),
352 0,
353 "expected an index list divisible into rectangles"
354 );
355 rects
356 }
357
358 fn indices_for_sorting(&self) -> [Ix; 3] {
359 [self[0], self[1], self[2]]
360 }
361}
362/// Triangles.
363impl<Ix: IndexInt> Primitive for [Ix; 3] {
364 type Item = Ix;
365 const LEN: usize = 3;
366 fn as_chunks(slice: &[Self::Item]) -> &[Self] {
367 let (tris, rest) = slice.as_chunks();
368 assert_eq!(
369 rest.len(),
370 0,
371 "expected an index list divisible into triangles"
372 );
373 tris
374 }
375 fn as_chunks_mut(slice: &mut [Self::Item]) -> &mut [Self] {
376 let (tris, rest) = slice.as_chunks_mut();
377 assert_eq!(
378 rest.len(),
379 0,
380 "expected an index list divisible into triangles"
381 );
382 tris
383 }
384
385 fn indices_for_sorting(&self) -> [Ix; 3] {
386 *self
387 }
388}
389
390// -------------------------------------------------------------------------------------------------
391
392// TODO: We have two different implementations of depth sorting, for different purposes,
393// that have gratuitous differences. Bring them closer together for clarity.
394
395/// Called by `SpaceMesh::store_indices_and_finish_compute()` to cull and store the indices of
396/// transparent triangles in preparation for depth sorting them later.
397///
398/// Reads the contents of `transparent_indices`, apppends culled copies to `indices`, and updates
399/// `transparent_meta` to specify where the copies are located.
400pub(crate) fn store_transparent_indices<M: MeshTypes, I: IndexInt>(
401 indices: &mut IndexVec,
402 transparent_meta: &mut [TransparentMeta; DepthOrdering::COUNT],
403 transparent_indices: FaceMap<Vec<I>>,
404) where
405 IndexVec: Extend<I>,
406{
407 #![allow(clippy::single_range_in_vec_init)]
408
409 if !M::Vertex::WANTS_DEPTH_SORTING || transparent_indices.values().all(|v| v.is_empty()) {
410 // Either there is nothing to sort (and all ranges will be length 0),
411 // or the destination doesn't want sorting anyway. In either case, write the
412 // indices once and fill out transparent_ranges with copies of that range.
413 //
414 // TODO: There is inadequate testing for this case (only the glTF export test catches if
415 // these indices are missing or extra).
416 let index_range =
417 extend_giving_range(indices, transparent_indices.into_values_iter().flatten());
418 transparent_meta.fill(TransparentMeta {
419 index_range,
420 depth_sort_validity: Aabb::EVERYWHERE,
421 dynamic_sub_ranges: SmallVec::new(),
422 });
423 return;
424 }
425
426 // Prepare un-sorted, but culled, indices for each direction.
427 for ordering in DepthOrdering::exhaust() {
428 let meta = &mut transparent_meta[ordering.to_index()];
429 if ordering == DepthOrdering::WITHIN {
430 // `WITHIN` is slightly different: nothing is culled, and we already know the
431 // dynamic_sub_ranges and don’t have to defer to the lazy `static_sort()`.
432 // TODO: We can improve slightly by culling faces that are on the outside surface
433 // of the bounding box, which can never be visible from within.
434
435 let index_range =
436 extend_giving_range(indices, transparent_indices.values().flatten().copied());
437 debug_assert!(!index_range.is_empty());
438 *meta = TransparentMeta {
439 // Always dynamically sort everything.
440 dynamic_sub_ranges: SmallVec::from([0..index_range.len()]),
441 // Haven't performed any sorting yet, so there is no region of validity.
442 depth_sort_validity: Aabb::EMPTY,
443 index_range,
444 };
445 } else {
446 // Cull and copy indices into the main array.
447 let index_range = extend_giving_range(
448 indices,
449 transparent_indices
450 .iter()
451 .filter(|&(face, _)| ordering.face_visible_from_here(face))
452 .flat_map(|(_, index_vec)| index_vec.iter().copied()),
453 );
454 *meta = TransparentMeta {
455 depth_sort_validity: if index_range.is_empty() {
456 // Everything was culled, so no sorting needed.
457 Aabb::EVERYWHERE
458 } else {
459 // Haven't performed any sorting yet, so there is no region of validity.
460 Aabb::EMPTY
461 },
462 // dynamic_sub_ranges will be computed when the sort is performed.
463 dynamic_sub_ranges: SmallVec::new(),
464 index_range,
465 };
466 }
467 }
468}
469
470/// Sort `indices` according to `ordering`, and determine which portions, if any,
471/// need to be “dynamically” sorted later according to the exact view position; then write that
472/// information into `meta`.
473fn static_sort<V: Vertex, P: Primitive>(
474 ordering: DepthOrdering,
475 vertices: &[V],
476 indices: &mut [P::Item],
477 meta: &mut TransparentMeta,
478) {
479 debug_assert!(meta.dynamic_sub_ranges.is_empty());
480
481 // This inverse() is because ... TODO: The old explanation was wrong but I'm not sure
482 // what one is right
483 let basis = ordering.sort_key_rotation().inverse().to_basis();
484
485 let mut sortable_primitives: Vec<OrderedPrimitive<P>> =
486 Vec::with_capacity(indices.len() / P::LEN);
487 sortable_primitives.extend(
488 P::as_chunks(indices)
489 .iter()
490 .map(|&indices_of_prim| OrderedPrimitive::new(indices_of_prim, vertices, basis)),
491 );
492
493 // Sort the vertices.
494 // Note: Benchmarks show that `sort_by` is faster than `sort_unstable_by` for this.
495 sortable_primitives.sort_by(OrderedPrimitive::cmp);
496
497 // Copy the result of the sort back into the index slice.
498 for (src, dst) in itertools::zip_eq(&sortable_primitives, P::as_chunks_mut(indices)) {
499 *dst = src.indices;
500 }
501
502 // Figure out what ranges of this sorting result need to be sorted dynamically,
503 // store them in `dynamic_sub_ranges`,
504 // and update `depth_sort_validity` to reflect the new state.
505 let within_on_axes = ordering.within_on_axes();
506 match within_on_axes {
507 _ if indices.is_empty() => {
508 // There are no items to sort, so don't generate any range (that would be empty).
509 // It is an invariant of `TransparentMeta` that `dynamic_sub_ranges` contains no
510 // empty ranges.
511 meta.depth_sort_validity = Aabb::EVERYWHERE;
512 }
513 0 => {
514 // The static sort we have already done suffices.
515 meta.depth_sort_validity = Aabb::EVERYWHERE;
516 }
517 3 => {
518 // Everything must be sorted dynamically.
519 // `store_transparent_indices()` should have marked this case as already eligible for
520 // dynamic sorting, bypassing `static_sort()` entirely.
521 unreachable!("should have skipped the static sort for WITHIN");
522 }
523 1 | 2 => {
524 // Find non-overlapping ranges along the non-within axis, because we only need to.
525 // do dynamic sort of things that overlap in that way.
526 //
527 // TODO: if within_on_axes = 1, then we have *two* non-within axes to work with.
528 // We could take advantage of that by grouping by rectangles instead of on a line.
529
530 let projection = basis.x;
531 debug_assert_ne!(ordering.0[projection.axis()], Rel::Within);
532
533 let mut prim_group = 0..1; // indices into sortable_primitives, not single indices!
534 let mut group_upper_bound = sortable_primitives[0].min_max_on_axis(vertices, basis.x).1;
535 for (i, prim) in sortable_primitives[1..].iter().enumerate() {
536 let (qmin, qmax) = prim.min_max_on_axis(vertices, basis.x);
537 if qmin < group_upper_bound {
538 // Add this primitive to the group because it overlaps on the axis
539 prim_group.end = i + 1;
540 group_upper_bound = qmax;
541 } else {
542 // Primitive does not overlap; finish the current group and start a new one.
543 if prim_group.len() > 1 {
544 meta.dynamic_sub_ranges
545 .push((prim_group.start * P::LEN)..(prim_group.end * P::LEN));
546 }
547 prim_group = i..(i + 1);
548 group_upper_bound = qmax;
549 }
550 }
551 // Write the last group
552 if prim_group.len() > 1 {
553 meta.dynamic_sub_ranges
554 .push((prim_group.start * P::LEN)..(prim_group.end * P::LEN));
555 }
556
557 if meta.dynamic_sub_ranges.is_empty() {
558 // No sub-ranges requiring dynamic sorting exist.
559 // Therefore, we have determined that our static sort is sufficient, and can mark
560 // it as valid everywhere.
561 meta.depth_sort_validity = Aabb::EVERYWHERE;
562 } else {
563 // We have determined what ranges require dynamic sorting, but
564 // those ranges are not yet sorted, so there is no volume of validity.
565 meta.depth_sort_validity = Aabb::EMPTY;
566 }
567 }
568 4.. => unreachable!(),
569 }
570
571 // Ensure we have signaled initialization and won't sort again.
572 debug_assert!(
573 !meta.needs_static_sort(),
574 "failed to mark mesh as having completed its static depth sort; \
575 ordering = {ordering:?}, within_on_axes = {within_on_axes}, meta = {meta:#?}"
576 );
577}
578
579/// Sort the existing indices of `indices[range]` for exactly the given view position.
580///
581/// This routine implements the “dynamic” depth sorting case, where the view position is within
582/// the transparent mesh and therefore cannot be described using a [`DepthOrdering`] simplification.
583///
584/// Returns information including whether there was any change in ordering.
585pub(crate) fn dynamic_depth_sort_for_view<M: MeshTypes>(
586 vertices: &[M::Vertex],
587 mut indices: IndexSliceMut<'_>,
588 ordering: DepthOrdering,
589 view_position: Position,
590 meta: &mut TransparentMeta,
591 has_non_rect_transparency: bool,
592) -> DepthSortResult {
593 if !M::Vertex::WANTS_DEPTH_SORTING {
594 return DepthSortResult::NOTHING_CHANGED;
595 }
596 if meta.depth_sort_validity.contains(view_position) {
597 // Previous dynamic sort is still valid.
598 // TODO: report this vs. other exit cases in info
599 return DepthSortResult::NOTHING_CHANGED;
600 }
601
602 // Check if we need to do our initial “static” sort.
603 // If this has not been done, then `depth_sort_validity` is not initialized and we don't
604 // know what to sort.
605 let needs_static_sort = meta.needs_static_sort();
606 if needs_static_sort {
607 // let start_time = Instant::now();
608 match (&mut indices, has_non_rect_transparency) {
609 (IndexSliceMut::U16(indices), false) => {
610 static_sort::<_, [u16; 6]>(ordering, vertices, indices, meta)
611 }
612 (IndexSliceMut::U32(indices), false) => {
613 static_sort::<_, [u32; 6]>(ordering, vertices, indices, meta)
614 }
615 (IndexSliceMut::U16(indices), true) => {
616 static_sort::<_, [u16; 3]>(ordering, vertices, indices, meta)
617 }
618 (IndexSliceMut::U32(indices), true) => {
619 static_sort::<_, [u32; 3]>(ordering, vertices, indices, meta)
620 }
621 }
622 // TODO: integrate this into DepthSortInfo instead
623 // log::trace!(
624 // "static {ordering:?} sort took {time} and produced {ranges} ranges",
625 // time = start_time.elapsed().refmt(&ConciseDebug),
626 // ranges = meta.dynamic_sub_ranges.len(),
627 // );
628 }
629
630 #[inline(never)] // save our inlining budget for the *contents* of this function
631 fn generic_sort<M: MeshTypes, P: Primitive>(
632 data: &mut [P::Item],
633 positions: &[M::Vertex],
634 view_position: Position,
635 meta: &mut TransparentMeta,
636 ) -> DepthSortResult {
637 let mut prims_sorted = 0;
638 let mut groups_sorted = 0;
639
640 // Accumulator of the new region of validity of this sort.
641 // This will be shrunk to exclude any position that crosses the plane of any surface of the
642 // mesh. As long as the viewpoint doesn’t exit this box, the sorting is still valid.
643 // (TODO: Prove this claim.)
644 let mut new_validity = Aabb::EVERYWHERE;
645
646 for sub_range in meta.dynamic_sub_ranges.iter().cloned() {
647 let data_slice: &mut [P::Item] = &mut data[sub_range];
648 // We want to sort the primitives (rectangles or triangles),
649 // so we reinterpret the slice as groups of indices.
650 let primitives_slice: &mut [P] = P::as_chunks_mut(data_slice);
651
652 primitives_slice.sort_unstable_by_key(
653 #[inline]
654 |indices| {
655 -OrderedFloat(manhattan_length(
656 view_position - midpoint(positions, *indices),
657 ))
658 },
659 );
660 prims_sorted += primitives_slice.len();
661 groups_sorted += 1;
662
663 // Update the range of validity to not go past any of the sorted vertices.
664 for &mut ix in data_slice {
665 let vertex_position = M::Vertex::position(&positions[ix.to_slice_index()]);
666 new_validity.exclude_beyond(vertex_position, view_position);
667 }
668 }
669
670 meta.depth_sort_validity = new_validity;
671
672 DepthSortResult {
673 // Find start and end of the union of all dynamic_sub_ranges.
674 changed: match meta.dynamic_sub_ranges.as_slice() {
675 [Range { start, end }] | [Range { start, .. }, .., Range { end, .. }] => {
676 let base = meta.index_range.start;
677 Some((base + start)..(base + end))
678 }
679 [] => None,
680 },
681 info: DepthSortInfo {
682 elements_sorted: prims_sorted,
683 groups_sorted,
684 static_groups_sorted: 0,
685 },
686 }
687 }
688
689 let mut result = match (indices, has_non_rect_transparency) {
690 (IndexSliceMut::U16(slice), false) => {
691 generic_sort::<M, [u16; 6]>(slice, vertices, view_position, meta)
692 }
693 (IndexSliceMut::U32(slice), false) => {
694 generic_sort::<M, [u32; 6]>(slice, vertices, view_position, meta)
695 }
696 (IndexSliceMut::U16(slice), true) => {
697 generic_sort::<M, [u16; 3]>(slice, vertices, view_position, meta)
698 }
699 (IndexSliceMut::U32(slice), true) => {
700 generic_sort::<M, [u32; 3]>(slice, vertices, view_position, meta)
701 }
702 };
703 if needs_static_sort {
704 // If we did a static sort, then all indices in the range changed,
705 // not just the ones the dynamic sort touched.
706 result.changed = Some(meta.index_range.clone());
707
708 result.info.elements_sorted += meta.index_range.len();
709 result.info.static_groups_sorted += 1;
710 }
711 result
712}
713
714// -------------------------------------------------------------------------------------------------
715
716/// Temporary depth-sortable version of a single primitive (rectangle or triangle) extracted from an
717/// [`IndexVec`].
718///
719/// This is used for “static” sorts which look like “sort on +X then +Z then -Y”, not for
720/// “dynamic” sorts which use distance from a specific view point.
721struct OrderedPrimitive<P> {
722 /// Sort key. Derived from the vertices but not actually a point. Never contains NaN.
723 order: [f32; 3],
724 /// Original index data; an array of 3 or 6 indices. Not used in ordering.
725 indices: P,
726}
727impl<P: Primitive> OrderedPrimitive<P> {
728 #[inline(always)]
729 fn new<V: Vertex>(primitive: P, vertices: &[V], basis: Vector3D<Face6, ()>) -> Self {
730 let midpoint = midpoint(vertices, primitive).to_vector();
731 OrderedPrimitive {
732 indices: primitive,
733 order: [
734 basis.x.dot(midpoint),
735 basis.y.dot(midpoint),
736 basis.z.dot(midpoint),
737 ],
738 }
739 }
740
741 /// Compare two primitives according to the desired sort order.
742 ///
743 /// This is not a [`PartialOrd`] implementation so that we don't have to follow the `Eq`
744 /// consistency rules, which would be either weird (`Eq` ignoring `indices` which are the actual
745 /// data we carry) or inefficient (comparing `indices` even though we don’t need to).
746 #[inline(always)]
747 fn cmp(&self, other: &Self) -> Ordering {
748 assume_no_nan_cmp(self.order[0], other.order[0]).then_with(|| {
749 assume_no_nan_cmp(self.order[1], other.order[1])
750 .then_with(|| assume_no_nan_cmp(self.order[2], other.order[2]))
751 })
752 }
753
754 #[inline(always)]
755 fn min_max_on_axis<V: Vertex>(&self, vertices: &[V], direction: Face6) -> (PosCoord, PosCoord) {
756 // We only need to look at one of the two triangles,
757 // because they have the same bounding rectangle.
758 let [i0, i1, i2] = self.indices.indices_for_sorting();
759 // This is unrolled because map()ing it might end up not inlining it, which would be very bad.
760 let p0 = vertices[i0.to_slice_index()].position();
761 let p1 = vertices[i1.to_slice_index()].position();
762 let p2 = vertices[i2.to_slice_index()].position();
763 let c0 = direction.dot(p0.to_vector());
764 let c1 = direction.dot(p1.to_vector());
765 let c2 = direction.dot(p2.to_vector());
766 (c0.min(c1).min(c2), c0.max(c1).max(c2))
767 }
768}
769
770/// Compute primitive midpoint from vertices, for depth sorting.
771///
772/// (The midpoint isn’t actually very meaningful to depth sorting, but it’s cheap to compute and,
773/// AFAIK, correct in all the cases we currently care about.)
774#[inline(always)] // the very hottest of inner loop code
775fn midpoint<V: Vertex, P: Primitive>(vertices: &[V], indices: P) -> Position {
776 // We only need to look at one of the two triangles,
777 // because they have the same bounding rectangle.
778 let [i0, i1, i2] = indices.indices_for_sorting();
779 // This is unrolled because map()ing it might end up not inlining it, which would be very bad.
780 let p0 = vertices[i0.to_slice_index()].position();
781 let p1 = vertices[i1.to_slice_index()].position();
782 let p2 = vertices[i2.to_slice_index()].position();
783 // TODO: consider deleting the * 0.5 and scaling the view position by * 2.0 instead
784 (p0.max(p1).max(p2) + p0.min(p1).min(p2).to_vector()) * 0.5
785}
786
787/// `storage.extend(items)` plus reporting the added range of items
788fn extend_giving_range<T>(
789 storage: &mut IndexVec,
790 items: impl IntoIterator<Item = T>,
791) -> Range<usize>
792where
793 IndexVec: Extend<T>,
794{
795 let start = storage.len();
796 storage.extend(items);
797 let end = storage.len();
798 start..end
799}
800
801#[inline]
802fn assume_no_nan_cmp(a: f32, b: f32) -> Ordering {
803 // `unwrap_or()` because we expect a complete lack of NaNs, and if there are any, more things
804 // are going to be broken than just this sort (so we don't need to detect it here by panicking).
805 // Not having any panic branch improves the performance of the sort.
806 PartialOrd::partial_cmp(&a, &b).unwrap_or(Ordering::Equal)
807}
808
809/// This is used for dynamic depth sorting as the “depth” to sort by, because it is more efficient
810/// than [`Vector3D::square_length()`] for our purposes. It requires no multiplication and,
811/// I suspect, creates fewer unnecessary ordering changes.
812#[inline]
813fn manhattan_length(v: Vector3D<PosCoord, MeshRel>) -> f32 {
814 v.x.abs() + v.y.abs() + v.z.abs()
815}
816
817// -------------------------------------------------------------------------------------------------
818
819#[cfg(test)]
820mod tests {
821 use super::*;
822 use crate::SpaceMesh;
823 use all_is_cubes::block;
824 use all_is_cubes::euclid::point3;
825 use all_is_cubes::math::{Aab, Cube, GridAab};
826 use all_is_cubes::space::Space;
827
828 #[test]
829 fn ordering_debug() {
830 assert_eq!(
831 format!(
832 "{:?}",
833 DepthOrdering(vec3(Rel::Lower, Rel::Within, Rel::Higher))
834 ),
835 "−W+"
836 );
837 }
838
839 /// Generic tests for all cases can be confusing and themselves incorrect.
840 /// Let's exercise some boring cases “end to end” with explanation.
841 #[test]
842 fn concrete_test_1() {
843 // Suppose that the camera is located in the +X+Y+Z octant relative to the geometry.
844 let ordering = DepthOrdering::from_view_of_aabb(point3(1., 1., 1.), Aab::ZERO);
845
846 // `Rel` names refer to the position of the camera relative to the geometry.
847 // Here, the camera is higher.
848 assert_eq!(ordering, DepthOrdering(Vector3D::splat(Rel::Higher)));
849
850 // In this case, the sorting rotation can be the identity rotation, because the
851 // drawing order is the ordering where coordinates increase.
852 assert_eq!(ordering.sort_key_rotation(), GridRotation::IDENTITY);
853
854 // TODO: run an actual depth sorting function and confirm it agrees with this.
855 }
856
857 /// As [`concrete_test_1`] but with a non-identity transform.
858 #[test]
859 fn concrete_test_2() {
860 // Suppose that the camera is located in the +X-Y-Z octant relative to the geometry.
861 let ordering = DepthOrdering::from_view_of_aabb(point3(-1., 1., 1.), Aab::ZERO);
862
863 assert_eq!(
864 ordering,
865 DepthOrdering(vec3(Rel::Lower, Rel::Higher, Rel::Higher))
866 );
867
868 // The sorting rotation flips the X axis so that drawing order is the order where
869 // the X axis decreases.
870 assert_eq!(ordering.sort_key_rotation(), GridRotation::RxYZ);
871 }
872
873 #[test]
874 fn list_of_orderings_is_complete() {
875 assert_eq!(DepthOrdering::exhaust().count(), 3usize.pow(3));
876 }
877
878 // TODO: This test was originally from an older design of `DepthOrdering`.
879 // It exercises more cases than needed and I don’t know if it has complete coverage any more.
880 #[test]
881 fn depth_ordering_from_view_of_aabb() {
882 let mut problems = Vec::new();
883 // A coordinate range of ±3 will (more than) exercise every combination of axis orderings.
884 let range = -3..3;
885 // TODO: exercise the bounds not being near 0
886 let bounds = Aab::from_lower_upper([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5]);
887 for x in range.clone() {
888 for y in range.clone() {
889 for z in range.clone() {
890 let camera_position = point3(x, y, z);
891
892 let ordering =
893 DepthOrdering::from_view_of_aabb(camera_position.to_f64(), bounds);
894
895 // TODO: this added assertion doesn't fit well in this test
896 for axis in Axis::ALL {
897 if camera_position[axis] == 0 {
898 assert_eq!(ordering.0[axis], Rel::Within);
899 }
900 }
901
902 let rotated_position =
903 ordering.sort_key_rotation().transform_vector(camera_position.to_vector());
904 // The sort rotation is supposed to rotate vertex positions so that they
905 // are back-to-front as coordinates increase.
906 // Therefore, if we rotate the vector which is the direction
907 // pointing towards the camera, it is now a vector pointing towards
908 // more positive coordinates, i.e. its components are non-negative.
909 let good = rotated_position.x >= 0
910 && rotated_position.y >= 0
911 && rotated_position.z >= 0;
912 println!(
913 "{:?} → {:?} → {:?}{}",
914 camera_position,
915 ordering,
916 rotated_position,
917 if good { "" } else { " (wrong)" }
918 );
919 if !good {
920 // Defer assertions to end so we can report all cases before panicking.
921 problems.push(rotated_position);
922 }
923 }
924 }
925 }
926 assert_eq!(problems, vec![]);
927 }
928
929 /// Tests that the expected [`DepthSortResult`] is produced under various conditions.
930 /// Also serves as a smoke test for the `has_non_rect_transparency` case
931 /// (checks that it doesn’t panic, but not that the actual sort is correct).
932 #[rstest::rstest]
933 fn depth_sort_result_from_space_mesh(
934 #[values(false, true)] transparent: bool,
935 #[values(false, true)] force_non_rect: bool,
936 ) {
937 let options =
938 &crate::MeshOptions::new(&all_is_cubes_render::camera::GraphicsOptions::default());
939 let tex = crate::testing::Allocator::new();
940 let opaque_block = &block::from_color!(1.0, 0.0, 0.0, 1.0);
941 let maybe_transparent_block = if transparent {
942 &block::from_color!(1.0, 0.0, 0.0, 0.5)
943 } else {
944 opaque_block
945 };
946 let space = Space::builder(GridAab::from_lower_size([0, 0, 0], [5, 1, 1]))
947 .build_and_mutate(|m| {
948 // Two blocks that need to be sorted vs. each other, if transparent
949 m.set([0, 0, 0], maybe_transparent_block)?;
950 m.set([2, 0, 0], maybe_transparent_block)?;
951 // A third block that is opaque just to exercise having opaque indices present
952 m.set([4, 0, 0], opaque_block)?;
953 Ok(())
954 })
955 .unwrap();
956 let mut block_meshes = crate::block_meshes_for_space(&space.read(), &tex, options);
957 if force_non_rect {
958 for mesh in &mut block_meshes {
959 mesh.force_non_rect_depth_sorting();
960 }
961 }
962 let mut space_mesh: SpaceMesh<crate::testing::TextureMt> =
963 SpaceMesh::new(&space.read(), space.bounds(), options, &*block_meshes);
964
965 let result = space_mesh.depth_sort_for_view(DepthOrdering::WITHIN, point3(0., 0., 0.));
966
967 assert_ne!(
968 space_mesh.opaque_range(),
969 0..0,
970 "if the opaque range is empty then this test is not sufficient"
971 );
972 assert_eq!(
973 result,
974 if transparent {
975 DepthSortResult {
976 // other cases might have shorter rather than equal ranges
977 changed: Some(space_mesh.transparent_range(DepthOrdering::WITHIN)),
978 info: DepthSortInfo {
979 elements_sorted: if force_non_rect {
980 // 2 transparent blocks × 6 faces × 2 triangles
981 24
982 } else {
983 // 2 transparent blocks × 6 faces × 1 square
984 12
985 },
986 groups_sorted: 1,
987 static_groups_sorted: 0,
988 },
989 }
990 } else {
991 DepthSortResult {
992 changed: None,
993 info: DepthSortInfo {
994 elements_sorted: 0,
995 groups_sorted: 0,
996 static_groups_sorted: 0,
997 },
998 }
999 }
1000 );
1001 }
1002
1003 /// Regression test for a bug when an index list turns out to be empty after culling.
1004 #[test]
1005 fn empty_after_culling() {
1006 let opaque_block = &block::from_color!(1.0, 0.0, 0.0, 1.0);
1007 let transparent_block = &block::from_color!(1.0, 0.0, 0.0, 0.5);
1008 let space = Space::builder(GridAab::from_lower_size([-1, -1, -1], [3, 3, 3]))
1009 .build_and_mutate(|m| {
1010 let center = Cube::ORIGIN;
1011 m.set(center, transparent_block)?;
1012 // Cover all but one face of the transparent block,
1013 // so the mesh contains only that face.
1014 for face in Face6::ALL {
1015 if face != Face6::PZ {
1016 m.set(center + face, opaque_block)?;
1017 }
1018 }
1019 Ok(())
1020 })
1021 .unwrap();
1022 let (_, _, mut space_mesh) = crate::testing::mesh_blocks_and_space(&space);
1023
1024 let ordering_with_face = DepthOrdering(vec3(Rel::Within, Rel::Within, Rel::Higher));
1025 let ordering_with_nothing = DepthOrdering(vec3(Rel::Within, Rel::Within, Rel::Lower));
1026 let position_with_nothing = Position::new(0.5, 0.5, 10.0);
1027 assert_eq!(
1028 (
1029 space_mesh.transparent_range(ordering_with_face).len(),
1030 space_mesh.transparent_range(ordering_with_nothing).len()
1031 ),
1032 (6, 0),
1033 "expected culling did not occur; test is invalid"
1034 );
1035
1036 assert!(
1037 !space_mesh.needs_depth_sorting(ordering_with_nothing, position_with_nothing),
1038 "sorting should be unnecessary since the range is empty; test failed"
1039 );
1040
1041 // this should succeed without tripping any assertions
1042 let result = space_mesh.depth_sort_for_view(ordering_with_nothing, position_with_nothing);
1043 assert_eq!(result.changed, None);
1044 }
1045
1046 /// Regression test for handling the case where, after the static sort completes, there is
1047 /// nothing for the dynamic sort to do even though the view direction would, in the general
1048 /// case, require dynamic sorting.
1049 #[test]
1050 fn no_dynamic_remains_after_static() {
1051 let opaque_block = block::from_color!(1.0, 0.0, 0.0, 1.0);
1052 let transparent_block = block::from_color!(1.0, 0.0, 0.0, 0.5);
1053 let space = Space::builder(GridAab::from_lower_size([-1, -1, -1], [3, 3, 3]))
1054 .filled_with(opaque_block)
1055 .build_and_mutate(|m| {
1056 // A line of transparent blocks punching through the opaque volume.
1057 // Thus, only their +X and -X faces are visible.
1058 m.fill_uniform(
1059 GridAab::from_lower_size([-1, 0, 0], [3, 1, 1]),
1060 &transparent_block,
1061 )
1062 })
1063 .unwrap();
1064 let (_, _, mut space_mesh) = crate::testing::mesh_blocks_and_space(&space);
1065
1066 let ordering = DepthOrdering(vec3(Rel::Lower, Rel::Within, Rel::Within));
1067 let position = Position::new(-10.5, 0.5, 0.5);
1068 assert_eq!(
1069 space_mesh.transparent_range(ordering).len(),
1070 6 * 3,
1071 "expected 3 rects",
1072 );
1073 assert_eq!(space_mesh.needs_depth_sorting(ordering, position), true);
1074
1075 assert!(space_mesh.depth_sort_for_view(ordering, position).changed.is_some());
1076 assert_eq!(space_mesh.needs_depth_sorting(ordering, position), false);
1077
1078 // Second sort should do nothing
1079 assert_eq!(
1080 space_mesh.depth_sort_for_view(ordering, position).changed,
1081 None
1082 );
1083 }
1084}