Skip to main content

viewport_lib/renderer/
picking.rs

1use super::*;
2
3impl ViewportRenderer {
4    // -----------------------------------------------------------------------
5    // Phase K — GPU object-ID picking
6    // -----------------------------------------------------------------------
7
8    /// GPU object-ID pick: renders the scene to an offscreen `R32Uint` texture
9    /// and reads back the single pixel under `cursor`.
10    ///
11    /// This is O(1) in mesh complexity — every object is rendered with a flat
12    /// `u32` ID, and only one pixel is read back. For triangle-level queries
13    /// (barycentric scalar probe, exact world position), use the CPU
14    /// [`crate::interaction::picking::pick_scene`] path instead.
15    ///
16    /// The pipeline is lazily initialized on first call — zero overhead when
17    /// this method is never invoked.
18    ///
19    /// # Arguments
20    /// * `device` — wgpu device
21    /// * `queue` — wgpu queue
22    /// * `cursor` — cursor position in viewport-local pixels (top-left origin)
23    /// * `frame` — current frame data (camera, scene_items, viewport_size)
24    ///
25    /// # Returns
26    /// `Some(GpuPickHit)` if an object is under the cursor, `None` if empty space.
27    pub fn pick_scene_gpu(
28        &mut self,
29        device: &wgpu::Device,
30        queue: &wgpu::Queue,
31        cursor: glam::Vec2,
32        frame: &FrameData,
33    ) -> Option<crate::interaction::picking::GpuPickHit> {
34        let vp_w = frame.viewport_size[0] as u32;
35        let vp_h = frame.viewport_size[1] as u32;
36
37        // --- bounds check ---
38        if cursor.x < 0.0
39            || cursor.y < 0.0
40            || cursor.x >= frame.viewport_size[0]
41            || cursor.y >= frame.viewport_size[1]
42            || vp_w == 0
43            || vp_h == 0
44        {
45            return None;
46        }
47
48        // --- lazy pipeline init ---
49        self.resources.ensure_pick_pipeline(device);
50
51        // --- build PickInstance data ---
52        // Sentinel scheme: object_id stored = (scene_items_index + 1) so that
53        // clear value 0 unambiguously means "no hit".
54        let pick_instances: Vec<PickInstance> = frame
55            .scene_items
56            .iter()
57            .enumerate()
58            .filter(|(_, item)| item.visible)
59            .map(|(idx, item)| {
60                let m = item.model;
61                PickInstance {
62                    model_c0: m[0],
63                    model_c1: m[1],
64                    model_c2: m[2],
65                    model_c3: m[3],
66                    object_id: (idx + 1) as u32,
67                    _pad: [0; 3],
68                }
69            })
70            .collect();
71
72        if pick_instances.is_empty() {
73            return None;
74        }
75
76        // Build a mapping from sentinel object_id → original scene_items index.
77        // Also track which scene_items are visible and their scene_items indices
78        // so we can issue the right draw calls.
79        let visible_items: Vec<(usize, &SceneRenderItem)> = frame
80            .scene_items
81            .iter()
82            .enumerate()
83            .filter(|(_, item)| item.visible)
84            .collect();
85
86        // --- pick instance storage buffer + bind group ---
87        let pick_instance_bytes = bytemuck::cast_slice(&pick_instances);
88        let pick_instance_buf = device.create_buffer(&wgpu::BufferDescriptor {
89            label: Some("pick_instance_buf"),
90            size: pick_instance_bytes.len().max(80) as u64,
91            usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
92            mapped_at_creation: false,
93        });
94        queue.write_buffer(&pick_instance_buf, 0, pick_instance_bytes);
95
96        let pick_instance_bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
97            label: Some("pick_instance_bg"),
98            layout: self
99                .resources
100                .pick_bind_group_layout_1
101                .as_ref()
102                .expect("ensure_pick_pipeline must be called first"),
103            entries: &[wgpu::BindGroupEntry {
104                binding: 0,
105                resource: pick_instance_buf.as_entire_binding(),
106            }],
107        });
108
109        // --- pick camera uniform buffer + bind group ---
110        let camera_uniform = CameraUniform {
111            view_proj: (frame.camera_proj * frame.camera_view).to_cols_array_2d(),
112            eye_pos: frame.camera_uniform.eye_pos,
113            _pad: 0.0,
114            forward: frame
115                .camera_view
116                .inverse()
117                .transform_vector3(-glam::Vec3::Z)
118                .normalize_or_zero()
119                .to_array(),
120            _pad1: 0.0,
121        };
122        let camera_bytes = bytemuck::bytes_of(&camera_uniform);
123        let pick_camera_buf = device.create_buffer(&wgpu::BufferDescriptor {
124            label: Some("pick_camera_buf"),
125            size: std::mem::size_of::<CameraUniform>() as u64,
126            usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
127            mapped_at_creation: false,
128        });
129        queue.write_buffer(&pick_camera_buf, 0, camera_bytes);
130
131        let pick_camera_bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
132            label: Some("pick_camera_bg"),
133            layout: self
134                .resources
135                .pick_camera_bgl
136                .as_ref()
137                .expect("ensure_pick_pipeline must be called first"),
138            entries: &[wgpu::BindGroupEntry {
139                binding: 0,
140                resource: pick_camera_buf.as_entire_binding(),
141            }],
142        });
143
144        // --- offscreen pick textures (R32Uint + R32Float) + depth ---
145        let pick_id_texture = device.create_texture(&wgpu::TextureDescriptor {
146            label: Some("pick_id_texture"),
147            size: wgpu::Extent3d {
148                width: vp_w,
149                height: vp_h,
150                depth_or_array_layers: 1,
151            },
152            mip_level_count: 1,
153            sample_count: 1,
154            dimension: wgpu::TextureDimension::D2,
155            format: wgpu::TextureFormat::R32Uint,
156            usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
157            view_formats: &[],
158        });
159        let pick_id_view = pick_id_texture.create_view(&wgpu::TextureViewDescriptor::default());
160
161        let pick_depth_texture = device.create_texture(&wgpu::TextureDescriptor {
162            label: Some("pick_depth_color_texture"),
163            size: wgpu::Extent3d {
164                width: vp_w,
165                height: vp_h,
166                depth_or_array_layers: 1,
167            },
168            mip_level_count: 1,
169            sample_count: 1,
170            dimension: wgpu::TextureDimension::D2,
171            format: wgpu::TextureFormat::R32Float,
172            usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
173            view_formats: &[],
174        });
175        let pick_depth_view =
176            pick_depth_texture.create_view(&wgpu::TextureViewDescriptor::default());
177
178        let depth_stencil_texture = device.create_texture(&wgpu::TextureDescriptor {
179            label: Some("pick_ds_texture"),
180            size: wgpu::Extent3d {
181                width: vp_w,
182                height: vp_h,
183                depth_or_array_layers: 1,
184            },
185            mip_level_count: 1,
186            sample_count: 1,
187            dimension: wgpu::TextureDimension::D2,
188            format: wgpu::TextureFormat::Depth24PlusStencil8,
189            usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
190            view_formats: &[],
191        });
192        let depth_stencil_view =
193            depth_stencil_texture.create_view(&wgpu::TextureViewDescriptor::default());
194
195        // --- render pass ---
196        let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
197            label: Some("pick_pass_encoder"),
198        });
199        {
200            let mut pick_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
201                label: Some("pick_pass"),
202                color_attachments: &[
203                    Some(wgpu::RenderPassColorAttachment {
204                        view: &pick_id_view,
205                        resolve_target: None,
206                        depth_slice: None,
207                        ops: wgpu::Operations {
208                            load: wgpu::LoadOp::Clear(wgpu::Color {
209                                r: 0.0,
210                                g: 0.0,
211                                b: 0.0,
212                                a: 0.0,
213                            }),
214                            store: wgpu::StoreOp::Store,
215                        },
216                    }),
217                    Some(wgpu::RenderPassColorAttachment {
218                        view: &pick_depth_view,
219                        resolve_target: None,
220                        depth_slice: None,
221                        ops: wgpu::Operations {
222                            load: wgpu::LoadOp::Clear(wgpu::Color {
223                                r: 1.0,
224                                g: 0.0,
225                                b: 0.0,
226                                a: 0.0,
227                            }),
228                            store: wgpu::StoreOp::Store,
229                        },
230                    }),
231                ],
232                depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
233                    view: &depth_stencil_view,
234                    depth_ops: Some(wgpu::Operations {
235                        load: wgpu::LoadOp::Clear(1.0),
236                        store: wgpu::StoreOp::Store,
237                    }),
238                    stencil_ops: None,
239                }),
240                timestamp_writes: None,
241                occlusion_query_set: None,
242            });
243
244            pick_pass.set_pipeline(
245                self.resources
246                    .pick_pipeline
247                    .as_ref()
248                    .expect("ensure_pick_pipeline must be called first"),
249            );
250            pick_pass.set_bind_group(0, &pick_camera_bg, &[]);
251            pick_pass.set_bind_group(1, &pick_instance_bg, &[]);
252
253            // Draw each visible item with its instance slot.
254            // Instance index in the storage buffer = position in pick_instances vec.
255            for (instance_slot, (_, item)) in visible_items.iter().enumerate() {
256                let Some(mesh) = self
257                    .resources
258                    .mesh_store
259                    .get(crate::resources::mesh_store::MeshId(item.mesh_index))
260                else {
261                    continue;
262                };
263                pick_pass.set_vertex_buffer(0, mesh.vertex_buffer.slice(..));
264                pick_pass.set_index_buffer(mesh.index_buffer.slice(..), wgpu::IndexFormat::Uint32);
265                let slot = instance_slot as u32;
266                pick_pass.draw_indexed(0..mesh.index_count, 0, slot..slot + 1);
267            }
268        }
269
270        // --- copy 1×1 pixels to staging buffers ---
271        // R32Uint: 4 bytes per pixel, min bytes_per_row = 256 (wgpu alignment)
272        let bytes_per_row_aligned = 256u32; // wgpu requires multiples of 256
273
274        let id_staging = device.create_buffer(&wgpu::BufferDescriptor {
275            label: Some("pick_id_staging"),
276            size: bytes_per_row_aligned as u64,
277            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
278            mapped_at_creation: false,
279        });
280        let depth_staging = device.create_buffer(&wgpu::BufferDescriptor {
281            label: Some("pick_depth_staging"),
282            size: bytes_per_row_aligned as u64,
283            usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
284            mapped_at_creation: false,
285        });
286
287        let px = cursor.x as u32;
288        let py = cursor.y as u32;
289
290        encoder.copy_texture_to_buffer(
291            wgpu::TexelCopyTextureInfo {
292                texture: &pick_id_texture,
293                mip_level: 0,
294                origin: wgpu::Origin3d { x: px, y: py, z: 0 },
295                aspect: wgpu::TextureAspect::All,
296            },
297            wgpu::TexelCopyBufferInfo {
298                buffer: &id_staging,
299                layout: wgpu::TexelCopyBufferLayout {
300                    offset: 0,
301                    bytes_per_row: Some(bytes_per_row_aligned),
302                    rows_per_image: Some(1),
303                },
304            },
305            wgpu::Extent3d {
306                width: 1,
307                height: 1,
308                depth_or_array_layers: 1,
309            },
310        );
311        encoder.copy_texture_to_buffer(
312            wgpu::TexelCopyTextureInfo {
313                texture: &pick_depth_texture,
314                mip_level: 0,
315                origin: wgpu::Origin3d { x: px, y: py, z: 0 },
316                aspect: wgpu::TextureAspect::All,
317            },
318            wgpu::TexelCopyBufferInfo {
319                buffer: &depth_staging,
320                layout: wgpu::TexelCopyBufferLayout {
321                    offset: 0,
322                    bytes_per_row: Some(bytes_per_row_aligned),
323                    rows_per_image: Some(1),
324                },
325            },
326            wgpu::Extent3d {
327                width: 1,
328                height: 1,
329                depth_or_array_layers: 1,
330            },
331        );
332
333        queue.submit(std::iter::once(encoder.finish()));
334
335        // --- map and read ---
336        let (tx_id, rx_id) = std::sync::mpsc::channel::<Result<(), wgpu::BufferAsyncError>>();
337        let (tx_dep, rx_dep) = std::sync::mpsc::channel::<Result<(), wgpu::BufferAsyncError>>();
338        id_staging
339            .slice(..)
340            .map_async(wgpu::MapMode::Read, move |r| {
341                let _ = tx_id.send(r);
342            });
343        depth_staging
344            .slice(..)
345            .map_async(wgpu::MapMode::Read, move |r| {
346                let _ = tx_dep.send(r);
347            });
348        device
349            .poll(wgpu::PollType::Wait {
350                submission_index: None,
351                timeout: Some(std::time::Duration::from_secs(5)),
352            })
353            .unwrap();
354        let _ = rx_id.recv().unwrap_or(Err(wgpu::BufferAsyncError));
355        let _ = rx_dep.recv().unwrap_or(Err(wgpu::BufferAsyncError));
356
357        let object_id = {
358            let data = id_staging.slice(..).get_mapped_range();
359            u32::from_le_bytes([data[0], data[1], data[2], data[3]])
360        };
361        id_staging.unmap();
362
363        let depth = {
364            let data = depth_staging.slice(..).get_mapped_range();
365            f32::from_le_bytes([data[0], data[1], data[2], data[3]])
366        };
367        depth_staging.unmap();
368
369        // --- decode sentinel ---
370        // 0 = miss (clear color); anything else is (scene_items_index + 1).
371        if object_id == 0 {
372            return None;
373        }
374
375        Some(crate::interaction::picking::GpuPickHit {
376            object_id: (object_id - 1) as u64,
377            depth,
378        })
379    }
380}