sparkl2d_kernels/cuda/
grid_update.rs1use crate::gpu_collider::{GpuCollider, GpuColliderSet};
2use crate::gpu_grid::{GpuGrid, GpuGridNode, GpuGridProjectionStatus};
3use crate::BlockHeaderId;
4use cuda_std::thread;
5use cuda_std::*;
6use na::{vector, Unit};
7use sparkl_core::dynamics::solver::BoundaryHandling;
8use sparkl_core::math::{Point, Real, Vector};
9
10#[cfg_attr(target_os = "cuda", kernel)] pub unsafe fn grid_update(
12 dt: Real,
13 mut next_grid: GpuGrid,
14 colliders_ptr: *const GpuCollider,
15 num_colliders: usize,
16 gravity: Vector<Real>,
17) {
18 let bid = BlockHeaderId(thread::block_idx_x());
19 #[cfg(feature = "dim2")]
20 let shift = vector![
21 thread::thread_idx_x() as usize,
22 thread::thread_idx_y() as usize
23 ];
24 #[cfg(feature = "dim3")]
25 let shift = vector![
26 thread::thread_idx_x() as usize,
27 thread::thread_idx_y() as usize,
28 thread::thread_idx_z() as usize
29 ];
30
31 let block = next_grid.active_block_unchecked(bid);
32
33 let cell_packed_id = bid.to_physical().node_id_unchecked(shift);
34 let cell_pos_int = block.virtual_id.unpack_pos_on_signed_grid() * 4 + shift.cast::<i64>();
35 let cell_width = next_grid.cell_width();
36
37 if let Some(cell) = next_grid.get_node_mut(cell_packed_id) {
38 let cell_pos = cell_pos_int.cast::<Real>() * cell_width;
39 let collider_set = GpuColliderSet {
40 ptr: colliders_ptr,
41 len: num_colliders,
42 };
43 update_single_cell(
44 dt,
45 cell,
46 cell_pos.into(),
47 cell_width,
48 &collider_set,
49 gravity,
50 )
51 }
52}
53
54fn sdf(cell_width: Real, colliders: &GpuColliderSet, query: Point<Real>) -> Option<Real> {
55 colliders
56 .iter()
57 .filter(|collider| collider.grid_boundary_handling != BoundaryHandling::None)
58 .filter_map(|collider| {
59 collider.shape.project_point_with_max_dist(
60 &collider.position,
61 &query,
62 false,
63 cell_width * 2.0,
64 )
65 })
66 .map(|projection| {
67 (projection.point - query).norm() * if projection.is_inside { -1. } else { 1. }
68 })
69 .min_by(|a, b| a.partial_cmp(b).unwrap_or(core::cmp::Ordering::Equal))
70}
71
72fn sdf_gradient(cell_width: Real, colliders: &GpuColliderSet, query: Point<Real>) -> Vector<Real> {
74 const FACTOR: Real = 0.1;
75 let sdf = |offset| sdf(cell_width, colliders, query + offset);
76 let partial = |direction: Unit<Vector<Real>>| {
77 let offset_pos = direction.scale(cell_width * FACTOR);
78 let offset_neg = -offset_pos;
79 match (sdf(offset_pos), sdf(offset_neg)) {
80 (Some(sample_pos), Some(sample_neg)) => {
81 (sample_pos - sample_neg) / cell_width / FACTOR / 2.
82 }
83 _ => 0.,
84 }
85 };
86 Vector::new(
87 partial(Vector::x_axis()),
88 partial(Vector::y_axis()),
89 #[cfg(feature = "dim3")]
90 partial(Vector::z_axis()),
91 )
92 .try_normalize(1.0e-5)
93 .unwrap_or(Vector::zeros())
94}
95
96fn update_single_cell(
97 dt: Real,
98 cell: &mut GpuGridNode,
99 cell_pos: Point<Real>,
100 cell_width: Real,
101 colliders: &GpuColliderSet,
102 gravity: Vector<Real>,
103) {
104 let mut cell_velocity = (cell.momentum_velocity + cell.mass * gravity * dt)
105 * sparkl_core::utils::inv_exact(cell.mass);
106
107 if cell.projection_status == GpuGridProjectionStatus::NotComputed {
108 let mut best_proj = None;
109 for (i, collider) in colliders.iter().enumerate() {
110 if collider.grid_boundary_handling == BoundaryHandling::None {
111 continue;
112 }
113
114 if let Some(proj) = collider.shape.project_point_with_max_dist(
115 &collider.position,
116 &cell_pos,
117 false,
118 cell_width * 2.0,
119 ) {
120 let projection_scaled_dir = cell_pos - proj.point;
121 let projection_dist = projection_scaled_dir.norm();
122 if projection_dist < best_proj.map(|(_, dist, _, _)| dist).unwrap_or(Real::MAX) {
123 best_proj = Some((projection_scaled_dir, projection_dist, proj.is_inside, i));
124 }
125 }
126 }
127
128 if let Some((scaled_dir, _, is_inside, id)) = best_proj {
129 cell.projection_status = if is_inside {
130 GpuGridProjectionStatus::Inside(id)
131 } else {
132 GpuGridProjectionStatus::Outside(id)
133 };
134 cell.projection_scaled_dir = scaled_dir;
135 } else {
136 cell.projection_status = GpuGridProjectionStatus::TooFar;
137 }
138
139 cell.collision_normal = sdf_gradient(cell_width, colliders, cell_pos);
140 }
141
142 match cell.projection_status {
143 GpuGridProjectionStatus::Inside(collider_id)
144 | GpuGridProjectionStatus::Outside(collider_id) => {
145 let is_inside = matches!(cell.projection_status, GpuGridProjectionStatus::Inside(_));
146 let collider = colliders.get(collider_id).unwrap();
147
148 match collider.grid_boundary_handling {
149 BoundaryHandling::Stick => {
150 if is_inside {
151 cell_velocity = na::zero();
152 }
153 }
154 BoundaryHandling::Friction | BoundaryHandling::FrictionZUp => {
155 if cell.collision_normal != Vector::zeros() {
156 let normal = cell.collision_normal;
157 let dist = cell.projection_scaled_dir.norm();
158
159 #[cfg(feature = "dim2")]
160 let apply_friction = true; #[cfg(feature = "dim3")]
162 let apply_friction = collider.grid_boundary_handling
163 == BoundaryHandling::Friction
164 || (collider.grid_boundary_handling == BoundaryHandling::FrictionZUp
165 && normal.z >= 0.0);
166
167 if apply_friction {
168 let normal_vel = cell_velocity.dot(&normal);
169
170 if normal_vel < 0.0 {
171 let dist_with_margin = dist - cell_width;
172 if is_inside || dist_with_margin <= 0.0 {
173 let tangent_vel = cell_velocity - normal_vel * normal;
174 let tangent_vel_norm = tangent_vel.norm();
175
176 cell_velocity = tangent_vel;
177
178 if tangent_vel_norm > 1.0e-10 {
179 let friction = collider.friction;
180 cell_velocity = tangent_vel / tangent_vel_norm
181 * (tangent_vel_norm + normal_vel * friction).max(0.0);
182 }
183 } else if -normal_vel * dt > dist_with_margin {
184 cell_velocity -= (dist_with_margin / dt + normal_vel) * normal;
185 }
186 }
187 }
188 }
189 }
190 BoundaryHandling::None => {}
191 }
192 }
193 _ => {}
194 }
195
196 cell.momentum_velocity = cell_velocity;
197 cell.psi_momentum_velocity *= sparkl_core::utils::inv_exact(cell.psi_mass);
198}