1use cgmath::num_traits::pow;
2use cgmath::{vec3, ElementWise, Vector3};
3use dot_vox::load;
4use slotmap::{SlotMap, new_key_type};
6use std::{collections::{HashMap, VecDeque}, path::Path};
7use bytemuck::{Pod, Zeroable};
8use wgpu::util::DeviceExt;
9use wide::f32x4; new_key_type! {
24 pub struct NodeKey;
26}
27
28#[derive(Debug)]
34pub struct Node64 {
35 child_mask: u64,
37 children: Vec<NodeKey>,
39 voxel_data: [u8; 3],
41 is_leaf: bool,
43}
44
45impl Default for Node64 {
46 fn default() -> Self {
47 Self {
48 child_mask: 0,
49 children: Vec::new(),
50 voxel_data: [0, 0, 0],
51 is_leaf: false,
52 }
53 }
54}
55
56pub struct Sparse64Tree {
58 pub nodes: SlotMap<NodeKey, Node64>,
60 pub root: NodeKey,
62}
63
64impl Sparse64Tree {
65 pub fn new() -> Self {
67 let mut nodes = SlotMap::with_key();
68 let root = nodes.insert(Node64::default());
69 Self { nodes, root }
70 }
71
72 pub fn insert(&mut self, x: u32, y: u32, z: u32, depth: usize, color: [u8; 3]) {
74 let mut current_key = self.root;
75 for current_depth in 0..depth {
76 let child_index = self.compute_child_index(x, y, z, current_depth, depth);
77
78 let need_new_child;
80 let rank;
81
82 {
83 let node = self.nodes.get_mut(current_key).unwrap();
84 need_new_child = (node.child_mask & (1 << child_index)) == 0;
85
86 rank = (node.child_mask & ((1 << child_index) - 1)).count_ones() as usize;
88
89 if need_new_child {
91 node.child_mask |= 1 << child_index;
92 }
93 }
94
95 if need_new_child {
96 let new_key = self.nodes.insert(Node64::default());
98
99 let node = self.nodes.get_mut(current_key).unwrap();
101 node.children.insert(rank, new_key);
102 current_key = new_key;
103 } else {
104 let child_key = {
106 let node = self.nodes.get(current_key).unwrap(); node.children[rank]
108 };
109 current_key = child_key;
110 }
111 }
112
113 if let Some(leaf_node) = self.nodes.get_mut(current_key) {
115 leaf_node.voxel_data = color;
116 leaf_node.is_leaf = true;
117 }
118 }
119
120 fn compute_child_index(&self, x: u32, y: u32, z: u32, depth: usize, max_depth: usize) -> usize {
122 let shift = (max_depth - depth - 1) * 2; let x_part = ((x >> shift) & 0b11) as usize;
124 let y_part = ((y >> shift) & 0b11) as usize;
125 let z_part = ((z >> shift) & 0b11) as usize;
126 (z_part << 4) | (y_part << 2) | x_part
127 }
128
129 pub fn flatten(&self) -> Vec<u8> {
133 let mut flattened_nodes = Vec::new();
134 let mut key_to_index = HashMap::new();
135 let mut queue = VecDeque::new();
136 queue.push_back(self.root);
137 while let Some(key) = queue.pop_front() {
138 let index = flattened_nodes.len();
139 key_to_index.insert(key, index);
140 let node = self.nodes.get(key).unwrap();
141 flattened_nodes.push((key, node));
142 for &child_key in &node.children {
144 queue.push_back(child_key);
145 }
146 }
147
148 let gpu_nodes: Vec<GpuNode64> = flattened_nodes
150 .iter()
151 .map(|(_key, node)| {
152 let child_ptr = if !node.children.is_empty() {
154 *key_to_index.get(&node.children[0]).unwrap() as u32
155 } else {
156 0
157 };
158 let leaf_flag = if node.is_leaf { 0x8000_0000 } else { 0 };
160 let child_ptr_and_leaf = child_ptr | leaf_flag;
161 let child_mask_low = (node.child_mask & 0xFFFF_FFFF) as u32;
162 let child_mask_high = (node.child_mask >> 32) as u32;
163 let color = [
165 node.voxel_data[0] as f32 / 255.0,
166 node.voxel_data[1] as f32 / 255.0,
167 node.voxel_data[2] as f32 / 255.0,
168 ];
169 GpuNode64 {
170 child_mask_low,
171 child_mask_high,
172 child_ptr_and_leaf,
173 _padding: 0,
174 color,
175 _padding2: 0,
176 }
177 })
178 .collect();
179
180 bytemuck::cast_slice(&gpu_nodes).to_vec()
182 }
183
184
185
186
187 pub fn generate_terrain_sdf_noise_simd(&mut self, aabb: AABB, max_depth: usize, noise: Perlin) {
190 let mut noise_cache = HashMap::new();
191 self.subdivide_node_noise_simd(self.root, aabb, 0, max_depth, &noise, &mut noise_cache);
192 }
193
194 fn subdivide_node_noise_simd(
197 &mut self,
198 node_key: NodeKey,
199 aabb: AABB,
200 depth: usize,
201 max_depth: usize,
202 noise: &Perlin,
203 cache: &mut HashMap<(i32, i32), f32>,
204 ) {
205 let corners = aabb.corners();
206 let sdf_values = eval_corners_simd(noise, &corners, cache);
207 let center_sdf = sdf_values.iter().sum::<f32>() / 8.0;
208
209 if sdf_values.iter().all(|&v| v >= 0.0) {
211 return;
212 }
213 if sdf_values.iter().all(|&v| v < 0.0) {
215 if let Some(node) = self.nodes.get_mut(node_key) {
216 node.is_leaf = true;
217 node.voxel_data = [34 + (generate_0_to_255() / 5), 139, 34 + (generate_0_to_255() / 5)]; }
219 return;
220 }
221
222 if depth < max_depth {
224 let cell_size = (aabb.max - aabb.min) / 4.0;
225 for cz in 0..4 {
226 for cy in 0..4 {
227 for cx in 0..4 {
228 let offset = vec3(cx as f32, cy as f32, cz as f32);
229 let child_min = aabb.min + offset.mul_element_wise(cell_size);
230 let child_max = child_min + cell_size;
231 let child_aabb = AABB { min: child_min, max: child_max };
232
233 let child_corners = child_aabb.corners();
234 let child_sdf_values = eval_corners_simd(noise, &child_corners, cache);
235 if child_sdf_values.iter().all(|&v| v >= 0.0) {
236 continue;
237 }
238
239 let child_index = (cz << 4) | (cy << 2) | cx;
240 let new_child_key = self.nodes.insert(Node64::default());
241 {
242 let parent = self.nodes.get_mut(node_key).unwrap();
243 let rank = (parent.child_mask & ((1 << child_index) - 1)).count_ones() as usize;
244 if parent.child_mask & (1 << child_index) == 0 {
245 parent.child_mask |= 1 << child_index;
246 parent.children.insert(rank, new_child_key);
247 }
248 }
249 self.subdivide_node_noise_simd(new_child_key, child_aabb, depth + 1, max_depth, noise, cache);
250 }
251 }
252 }
253 } else {
254 if center_sdf < 0.0 {
256 if let Some(node) = self.nodes.get_mut(node_key) {
257 node.is_leaf = true;
258 node.voxel_data = [34 + (generate_0_to_255() / 5), 139, 34 + (generate_0_to_255() / 5)];
259 }
260 }
261 }
262 }
263
264
265
266
267
268}
269
270#[derive(Clone, Copy)]
271pub struct AABB {
272 pub min: Vector3<f32>,
273 pub max: Vector3<f32>,
274}
275
276impl AABB {
277 pub fn corners(&self) -> [Vector3<f32>; 8] {
279 [
280 self.min,
281 Vector3::new(self.max.x, self.min.y, self.min.z),
282 Vector3::new(self.min.x, self.max.y, self.min.z),
283 Vector3::new(self.min.x, self.min.y, self.max.z),
284 Vector3::new(self.max.x, self.max.y, self.min.z),
285 Vector3::new(self.max.x, self.min.y, self.max.z),
286 Vector3::new(self.min.x, self.max.y, self.max.z),
287 self.max,
288 ]
289 }
290}
291fn get_noise_value(
294 noise: &Perlin,
295 p: &Vector3<f32>,
296 cache: &mut HashMap<(i32, i32), f32>,
297) -> f32 {
298
299 let key = (((p.x * 20.0).floor()) as i32, ((p.z * 20.0).floor()) as i32);
301 if let Some(&value) = cache.get(&key) {
302 value
303 } else {
304 let value = noise.get([p.x as f64 * 0.05, p.z as f64 * 0.05]) as f32;
305 cache.insert(key, value);
306 value
307 }
308}
309
310fn eval_corners_simd(
313 noise: &Perlin,
314 corners: &[Vector3<f32>; 8],
315 cache: &mut HashMap<(i32, i32), f32>,
316) -> [f32; 8] {
317 let mut result = [0.0f32; 8];
318 for i in 0..2 {
320 let start = i * 4;
321 let xs = f32x4::from([
322 corners[start].x,
323 corners[start + 1].x,
324 corners[start + 2].x,
325 corners[start + 3].x,
326 ]);
327 let ys = f32x4::from([
328 corners[start].y,
329 corners[start + 1].y,
330 corners[start + 2].y,
331 corners[start + 3].y,
332 ]);
333 let raw_noise_arr = [
334 get_noise_value(noise, &corners[start], cache),
335 get_noise_value(noise, &corners[start + 1], cache),
336 get_noise_value(noise, &corners[start + 2], cache),
337 get_noise_value(noise, &corners[start + 3], cache),
338 ];
339 let raw_noise = f32x4::from(raw_noise_arr);
340
341 let smooth_noise = raw_noise * raw_noise * (f32x4::splat(3.0) - f32x4::splat(2.0) * raw_noise);
343 let base_height = f32x4::splat(10.0);
344 let amplitude = f32x4::splat(5.0);
345 let interpolated_height = base_height + smooth_noise * amplitude;
346 let mut sdf = ys - interpolated_height;
348
349 let sdf_arr = sdf.as_array_mut();
350 result[start] = sdf_arr[0];
351 result[start + 1] = sdf_arr[1];
352 result[start + 2] = sdf_arr[2];
353 result[start + 3] = sdf_arr[3];
354 }
355 result
356}
357
358
359
360
361#[repr(C)]
375#[derive(Copy, Clone, Debug, Pod, Zeroable)]
376pub struct GpuNode64 {
377 pub child_mask_low: u32,
378 pub child_mask_high: u32,
379 pub child_ptr_and_leaf: u32,
380 pub _padding: u32,
381 pub color: [f32; 3],
382 pub _padding2: u32, }
384
385pub struct Tree64GpuManager {
387 node_buffer: wgpu::Buffer,
388 num_nodes: u32,
389 pub contree_bind_group: wgpu::BindGroup,
390 pub contree_bind_group_layout: wgpu::BindGroupLayout,
391}
392
393impl Tree64GpuManager {
394 pub fn new(device: &wgpu::Device, contree: &Sparse64Tree) -> Self {
396 let node_buffer = Self::collect_nodes(contree, device);
397 let contree_bind_group_layout =
398 device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
399 entries: &[
400 wgpu::BindGroupLayoutEntry {
402 binding: 0,
403 visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
404 ty: wgpu::BindingType::Buffer {
405 ty: wgpu::BufferBindingType::Storage { read_only: true },
406 has_dynamic_offset: false,
407 min_binding_size: None,
408 },
409 count: None,
410 },
411 ],
412 label: Some("Contree Bind Group Layout"),
413 });
414
415 let contree_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
417 layout: &contree_bind_group_layout,
418 entries: &[wgpu::BindGroupEntry {
419 binding: 0,
420 resource: node_buffer.as_entire_binding(),
421 }],
422 label: Some("Contree Bind Group"),
423 });
424
425 Self {
426 node_buffer,
427 num_nodes: contree.nodes.len() as u32,
428 contree_bind_group,
429 contree_bind_group_layout,
430 }
431 }
432
433 pub fn collect_nodes(tree: &Sparse64Tree, device: &wgpu::Device) -> wgpu::Buffer {
435 let node_data = tree.flatten();
436 device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
437 label: Some("Node Buffer"),
438 contents: &node_data,
439 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
440 })
441 }
442
443 pub fn upload_tree(&mut self, queue: &wgpu::Queue, tree: &Sparse64Tree) {
445 let node_data = tree.flatten();
446 self.num_nodes = tree.nodes.len() as u32;
447 queue.write_buffer(&self.node_buffer, 0, &node_data);
448 }
449
450 pub fn get_buffer(&self) -> &wgpu::Buffer {
452 &self.node_buffer
453 }
454}
455
456use noise::{NoiseFn, Perlin};
469pub fn create_test_tree() -> Sparse64Tree {
471 let perlin = Perlin::new(1);
472
473 let mut tree = Sparse64Tree::new();
474
475 let depth = 4;
476 let past_grid: i32 = pow(4, depth);
477 let grid_size = past_grid as u32;
478 let noise_size = 128;
479
480 for x in 0..grid_size {
481 for z in 0..grid_size {
482 let val = ((perlin.get([(x as f64) / noise_size as f64, (z as f64) / noise_size as f64]) * 10.0) + 50.0).abs() as u32;
483 for y in 0..val {
484 if y > (val / 2) {
485 tree.insert(x, y, z, depth as usize, [0, 121 + (generate_0_to_255() / 5), 40]);
486 } else {
487 tree.insert(x, y, z, depth as usize, [121 + (generate_0_to_255() / 15), 60, 0]);
488 }
489 }
490 }
491 }
492
493 tree
494}
495
496pub fn add_vox_to_tree(file_path: &str, depth: usize, offsetx: u32, offsety: u32, offsetz: u32, tree: &mut Sparse64Tree){
527
528 let vox_data = match load(Path::new(file_path).to_str().expect("msg")) {
530 Ok(data) => data,
531 Err(e) => {
532 eprintln!("Failed to load .vox file: {}", e);
533 return;
534 }
535 };
536
537 for model in &vox_data.models {
539 let model_offset = (0, 0, 0); for voxel in &model.voxels {
542 let x = voxel.x as u32 + model_offset.0;
543 let y = voxel.z as u32 + model_offset.1;
544 let z = voxel.y as u32 + model_offset.2;
545 let color_index = voxel.i as usize;
546
547 let color = if color_index < vox_data.palette.len() {
549 let c = vox_data.palette[color_index];
550 [c.r, c.g, c.b]
551 } else {
552 [255, 255, 255] };
554
555
556
557
558 tree.insert(x + offsetx, y + offsety, z + offsetz, depth, color);
560 }
561 }
562}
563
564pub fn create_test_tree_from_vox(file_path: &str, depth: usize) -> Sparse64Tree {
565 let mut tree = Sparse64Tree::new();
566
567 let vox_data = match load(Path::new(file_path).to_str().expect("msg")) {
569 Ok(data) => data,
570 Err(e) => {
571 eprintln!("Failed to load .vox file: {}", e);
572 return tree;
573 }
574 };
575
576 for model in &vox_data.models {
578 let model_offset = (0, 0, 0); for voxel in &model.voxels {
581 let x = voxel.x as u32 + model_offset.0;
582 let y = voxel.z as u32 + model_offset.1;
583 let z = voxel.y as u32 + model_offset.2;
584 let color_index = voxel.i as usize;
585
586 let color = if color_index < vox_data.palette.len() {
588 let c = vox_data.palette[color_index];
589 [c.r, c.g, c.b]
590 } else {
591 [255, 255, 255] };
593
594 tree.insert(x, y, z, depth, color);
596 }
597 }
598
599 tree
600}
601
602
603use rand::Rng;
604
605pub fn generate_0_to_255() -> u8 {
606 let mut rng = rand::rng();
607 rng.random_range(0..=255) as u8 }
609
610
611
612
613
614pub struct TreeMemoryManager {
626 buffer: wgpu::Buffer,
627 root_node_buffer: wgpu::Buffer, capacity: usize,
629 allocations: HashMap<u32, usize>, free_slots: Vec<usize>, pub bind_group: wgpu::BindGroup,
632 pub bind_group_layout: wgpu::BindGroupLayout,
633 pub root_bind_group: wgpu::BindGroup,
634 pub root_bind_group_layout: wgpu::BindGroupLayout,
635}
636
637impl TreeMemoryManager {
638 pub fn new(device: &wgpu::Device, max_trees: usize, tree_size: usize) -> Self {
640 let buffer_size = max_trees * tree_size;
641 let root_buffer_size = max_trees * std::mem::size_of::<u32>();
642
643 let buffer = device.create_buffer(&wgpu::BufferDescriptor {
644 label: Some("Sparse Voxel Tree Buffer"),
645 size: buffer_size as u64,
646 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
647 mapped_at_creation: false,
648 });
649
650 let root_node_buffer = device.create_buffer(&wgpu::BufferDescriptor {
651 label: Some("Tree Root Node Buffer"),
652 size: root_buffer_size as u64,
653 usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
654 mapped_at_creation: false,
655 });
656
657 let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
658 label: Some("Tree Bind Group Layout"),
659 entries: &[wgpu::BindGroupLayoutEntry {
660 binding: 0,
661 visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
662 ty: wgpu::BindingType::Buffer {
663 ty: wgpu::BufferBindingType::Storage { read_only: true },
664 has_dynamic_offset: false,
665 min_binding_size: None,
666 },
667 count: None,
668 }],
669 });
670
671 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
672 label: Some("Tree Bind Group"),
673 layout: &bind_group_layout,
674 entries: &[wgpu::BindGroupEntry {
675 binding: 0,
676 resource: buffer.as_entire_binding(),
677 }],
678 });
679
680 let root_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
681 label: Some("Root Node Bind Group Layout"),
682 entries: &[wgpu::BindGroupLayoutEntry {
683 binding: 0,
684 visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
685 ty: wgpu::BindingType::Buffer {
686 ty: wgpu::BufferBindingType::Storage { read_only: true },
687 has_dynamic_offset: false,
688 min_binding_size: None,
689 },
690 count: None,
691 }],
692 });
693
694 let root_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
695 label: Some("Root Node Bind Group"),
696 layout: &root_bind_group_layout,
697 entries: &[wgpu::BindGroupEntry {
698 binding: 0,
699 resource: root_node_buffer.as_entire_binding(),
700 }],
701 });
702
703 let free_slots = (0..max_trees).map(|i| i * tree_size).collect();
704
705 Self {
706 buffer,
707 root_node_buffer,
708 capacity: max_trees,
709 allocations: HashMap::new(),
710 free_slots,
711 bind_group,
712 bind_group_layout,
713 root_bind_group,
714 root_bind_group_layout,
715 }
716 }
717
718 pub fn upload_tree(&mut self, queue: &wgpu::Queue, tree_id: u32, tree_data: &[u8], root_node_offset: u32) {
720 if let Some(&offset) = self.allocations.get(&tree_id) {
721 queue.write_buffer(&self.buffer, offset as u64, tree_data);
722 self.update_root_reference(queue, tree_id, root_node_offset);
723 } else if let Some(offset) = self.free_slots.pop() {
724 self.allocations.insert(tree_id, offset);
725 queue.write_buffer(&self.buffer, offset as u64, tree_data);
726 self.update_root_reference(queue, tree_id, root_node_offset);
727 } else {
728 panic!("No available slots for new trees!");
729 }
730 }
731
732 pub fn update_root_reference(&self, queue: &wgpu::Queue, tree_id: u32, root_node_offset: u32) {
734 let root_index = tree_id as usize * std::mem::size_of::<u32>();
735 queue.write_buffer(&self.root_node_buffer, root_index as u64, bytemuck::bytes_of(&root_node_offset));
736 }
737
738 pub fn remove_tree(&mut self, tree_id: u32) {
740 if let Some(offset) = self.allocations.remove(&tree_id) {
741 self.free_slots.push(offset);
742 }
743 }
744
745 pub fn get_buffer(&self) -> &wgpu::Buffer {
747 &self.buffer
748 }
749
750 pub fn get_root_node_buffer(&self) -> &wgpu::Buffer {
752 &self.root_node_buffer
753 }
754}
755
756