1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
// SPDX-License-Identifier: MIT OR Apache-2.0
// Copyright (c) 2025 lacklustr@protonmail.com https://github.com/eadf
use crate::common::VertexIndex;
use crate::common::macros::{integrity_assert, integrity_assert_eq};
use crate::common::remesh_error::RemeshError;
use crate::isotropic_remesh::IsotropicRemeshAlgo;
use std::fmt::Debug;
use vector_traits::num_traits::AsPrimitive;
impl<S, V, const ENABLE_UNSAFE: bool> IsotropicRemeshAlgo<S, V, ENABLE_UNSAFE>
where
S: crate::common::sealed::ScalarType,
f64: AsPrimitive<S>,
V: Debug + Copy + From<[S; 3]> + Into<[S; 3]> + Sync + 'static,
{
/// Compresses a mesh by removing unreferenced vertices
/// Returns (`new_vertices`, `new_indices`) with unreferenced vertices removed and the remaining type
/// converted to `V`.
pub(crate) fn compress_mesh(self) -> Result<(Vec<V>, Vec<u32>), RemeshError> {
let actual_used_vertices = self.vertex_pool.active_count();
let vertices = self.vertices;
let indices = self.corner_table.take_triangles();
let mut vertex_pool = self.vertex_pool;
/*#[cfg(feature = "integrity_check")]
if false && vertices.len() < 20 {
println!("pre-compress vertices:");
for (i, &v) in vertices.iter().map(|v| v.into()).enumerate() {
if indices.contains(&i) {
println!("V{i}:{:?}", v.into());
} else {
println!("V{i}:{:?} not used anywhere!", v.into());
}
}
println!("pre-compress indices:{indices:?}:{}", indices.len());
} else {
println!("running compress mesh {} {}", vertices.len(), indices.len());
}*/
if vertices.is_empty() || indices.is_empty() {
return Ok((Vec::new(), Vec::new()));
}
let vertex_count = vertices.len() as u32;
let used_count = vertex_pool.active_count();
let used_vertices = vertex_pool.take_used();
if used_count == vertex_count {
// No compression needed - all vertices are used
debug_assert_eq!(
vertices.len(),
actual_used_vertices as usize,
"vertices.len() mismatch"
);
// Optimization: skip conversion if V is exactly the same type as S::Vec3
// This helps when users work directly with glam types
if ENABLE_UNSAFE && std::any::TypeId::of::<V>() == std::any::TypeId::of::<S::Vec3>() {
unsafe {
// Safe because TypeId guarantees the types are identical
return Ok((
std::mem::transmute::<Vec<S::Vec3>, Vec<V>>(vertices),
indices,
));
}
}
return Ok((
vertices.into_iter().map(|v| Self::from_glam(v)).collect(),
indices,
));
}
// Build mapping from old to new indices while collecting used vertices
let mut new_vertices = Vec::with_capacity(used_count as usize);
let mut old_to_new_index = vec![VertexIndex::INVALID.0; vertex_count as usize]; // usize::MAX indicates unused
for (new_index, old_index) in used_vertices.iter_set_bits(..).enumerate() {
new_vertices.push(Self::from_glam(vertices[old_index]));
old_to_new_index[old_index] = new_index as u32;
}
// Remap indices - single pass
let new_indices: Vec<u32> = indices
.chunks_exact(3)
.filter_map(|triangle| {
let first_idx = triangle[0];
if first_idx < vertex_count {
integrity_assert!(
triangle[1] < vertex_count,
"Inconsistent triangle: second index invalid"
);
integrity_assert!(
triangle[2] < vertex_count,
"Inconsistent triangle: third index invalid"
);
Some([
old_to_new_index[triangle[0] as usize],
old_to_new_index[triangle[1] as usize],
old_to_new_index[triangle[2] as usize],
])
} else {
integrity_assert_eq!(
triangle[1],
VertexIndex::INVALID.0,
"Inconsistent triangle: mixed valid/invalid indices"
);
integrity_assert_eq!(
triangle[2],
VertexIndex::INVALID.0,
"Inconsistent triangle: mixed valid/invalid indices"
);
None
}
})
.flatten()
.collect();
assert_eq!(
new_vertices.len(),
actual_used_vertices as usize,
"new_vertices.len() mismatch"
);
Ok((new_vertices, new_indices))
}
}