headless_renderer/headless_renderer.rs
1//! This example illustrates how to make a headless renderer.
2//! Derived from: <https://sotrh.github.io/learn-wgpu/showcase/windowless/#a-triangle-without-a-window>
3//! It follows these steps:
4//!
5//! 1. Render from camera to gpu-image render target
6//! 2. Copy from gpu image to buffer using `ImageCopyDriver` node in `RenderGraph`
7//! 3. Copy from buffer to channel using `receive_image_from_buffer` after `RenderSystems::Render`
8//! 4. Save from channel to random named file using `scene::update` at `PostUpdate` in `MainWorld`
9//! 5. Exit if `single_image` setting is set
10//!
11//! If your goal is to capture a single “screenshot” as opposed to every single rendered frame
12//! without gaps, it is simpler to use [`bevy::render::view::window::screenshot::Screenshot`]
13//! than this approach.
14
15use bevy::{
16 app::{AppExit, ScheduleRunnerPlugin},
17 camera::RenderTarget,
18 core_pipeline::tonemapping::Tonemapping,
19 image::TextureFormatPixelInfo,
20 prelude::*,
21 render::{
22 render_asset::RenderAssets,
23 render_graph::{self, NodeRunError, RenderGraph, RenderGraphContext, RenderLabel},
24 render_resource::{
25 Buffer, BufferDescriptor, BufferUsages, CommandEncoderDescriptor, Extent3d, MapMode,
26 PollType, TexelCopyBufferInfo, TexelCopyBufferLayout, TextureFormat, TextureUsages,
27 },
28 renderer::{RenderContext, RenderDevice, RenderQueue},
29 Extract, Render, RenderApp, RenderSystems,
30 },
31 window::ExitCondition,
32 winit::WinitPlugin,
33};
34use crossbeam_channel::{Receiver, Sender};
35use std::{
36 ops::{Deref, DerefMut},
37 path::PathBuf,
38 sync::{
39 atomic::{AtomicBool, Ordering},
40 Arc,
41 },
42 time::Duration,
43};
44// To communicate between the main world and the render world we need a channel.
45// Since the main world and render world run in parallel, there will always be a frame of latency
46// between the data sent from the render world and the data received in the main world
47//
48// frame n => render world sends data through the channel at the end of the frame
49// frame n + 1 => main world receives the data
50//
51// Receiver and Sender are kept in resources because there is single camera and single target
52// That's why there is single images role, if you want to differentiate images
53// from different cameras, you should keep Receiver in ImageCopier and Sender in ImageToSave
54// or send some id with data
55
56/// This will receive asynchronously any data sent from the render world
57#[derive(Resource, Deref)]
58struct MainWorldReceiver(Receiver<Vec<u8>>);
59
60/// This will send asynchronously any data to the main world
61#[derive(Resource, Deref)]
62struct RenderWorldSender(Sender<Vec<u8>>);
63
64// Parameters of resulting image
65struct AppConfig {
66 width: u32,
67 height: u32,
68 single_image: bool,
69}
70
71fn main() {
72 let config = AppConfig {
73 width: 1920,
74 height: 1080,
75 single_image: true,
76 };
77
78 // setup frame capture
79 App::new()
80 .insert_resource(SceneController::new(
81 config.width,
82 config.height,
83 config.single_image,
84 ))
85 .insert_resource(ClearColor(Color::srgb_u8(0, 0, 0)))
86 .add_plugins(
87 DefaultPlugins
88 .set(ImagePlugin::default_nearest())
89 // Not strictly necessary, as the inclusion of ScheduleRunnerPlugin below
90 // replaces the bevy_winit app runner and so a window is never created.
91 .set(WindowPlugin {
92 primary_window: None,
93 // Don’t automatically exit due to having no windows.
94 // Instead, the code in `update()` will explicitly produce an `AppExit` event.
95 exit_condition: ExitCondition::DontExit,
96 ..default()
97 })
98 // WinitPlugin will panic in environments without a display server.
99 .disable::<WinitPlugin>(),
100 )
101 .add_plugins(ImageCopyPlugin)
102 // headless frame capture
103 .add_plugins(CaptureFramePlugin)
104 // ScheduleRunnerPlugin provides an alternative to the default bevy_winit app runner, which
105 // manages the loop without creating a window.
106 .add_plugins(ScheduleRunnerPlugin::run_loop(
107 // Run 60 times per second.
108 Duration::from_secs_f64(1.0 / 60.0),
109 ))
110 .init_resource::<SceneController>()
111 .add_systems(Startup, setup)
112 .run();
113}
114
115/// Capture image settings and state
116#[derive(Debug, Default, Resource)]
117struct SceneController {
118 state: SceneState,
119 name: String,
120 width: u32,
121 height: u32,
122 single_image: bool,
123}
124
125impl SceneController {
126 pub fn new(width: u32, height: u32, single_image: bool) -> SceneController {
127 SceneController {
128 state: SceneState::BuildScene,
129 name: String::from(""),
130 width,
131 height,
132 single_image,
133 }
134 }
135}
136
137/// Capture image state
138#[derive(Debug, Default)]
139enum SceneState {
140 #[default]
141 // State before any rendering
142 BuildScene,
143 // Rendering state, stores the number of frames remaining before saving the image
144 Render(u32),
145}
146
147fn setup(
148 mut commands: Commands,
149 mut meshes: ResMut<Assets<Mesh>>,
150 mut materials: ResMut<Assets<StandardMaterial>>,
151 mut images: ResMut<Assets<Image>>,
152 mut scene_controller: ResMut<SceneController>,
153 render_device: Res<RenderDevice>,
154) {
155 let render_target = setup_render_target(
156 &mut commands,
157 &mut images,
158 &render_device,
159 &mut scene_controller,
160 // pre_roll_frames should be big enough for full scene render,
161 // but the bigger it is, the longer example will run.
162 // To visualize stages of scene rendering change this param to 0
163 // and change AppConfig::single_image to false in main
164 // Stages are:
165 // 1. Transparent image
166 // 2. Few black box images
167 // 3. Fully rendered scene images
168 // Exact number depends on device speed, device load and scene size
169 40,
170 "main_scene".into(),
171 );
172
173 // Scene example for non black box picture
174 // circular base
175 commands.spawn((
176 Mesh3d(meshes.add(Circle::new(4.0))),
177 MeshMaterial3d(materials.add(Color::WHITE)),
178 Transform::from_rotation(Quat::from_rotation_x(-std::f32::consts::FRAC_PI_2)),
179 ));
180 // cube
181 commands.spawn((
182 Mesh3d(meshes.add(Cuboid::new(1.0, 1.0, 1.0))),
183 MeshMaterial3d(materials.add(Color::srgb_u8(124, 144, 255))),
184 Transform::from_xyz(0.0, 0.5, 0.0),
185 ));
186 // light
187 commands.spawn((
188 PointLight {
189 shadows_enabled: true,
190 ..default()
191 },
192 Transform::from_xyz(4.0, 8.0, 4.0),
193 ));
194
195 commands.spawn((
196 Camera3d::default(),
197 Camera {
198 // render to image
199 target: render_target,
200 ..default()
201 },
202 Tonemapping::None,
203 Transform::from_xyz(-2.5, 4.5, 9.0).looking_at(Vec3::ZERO, Vec3::Y),
204 ));
205}
206
207/// Plugin for Render world part of work
208pub struct ImageCopyPlugin;
209impl Plugin for ImageCopyPlugin {
210 fn build(&self, app: &mut App) {
211 let (s, r) = crossbeam_channel::unbounded();
212
213 let render_app = app
214 .insert_resource(MainWorldReceiver(r))
215 .sub_app_mut(RenderApp);
216
217 let mut graph = render_app.world_mut().resource_mut::<RenderGraph>();
218 graph.add_node(ImageCopy, ImageCopyDriver);
219 graph.add_node_edge(bevy::render::graph::CameraDriverLabel, ImageCopy);
220
221 render_app
222 .insert_resource(RenderWorldSender(s))
223 // Make ImageCopiers accessible in RenderWorld system and plugin
224 .add_systems(ExtractSchedule, image_copy_extract)
225 // Receives image data from buffer to channel
226 // so we need to run it after the render graph is done
227 .add_systems(
228 Render,
229 receive_image_from_buffer.after(RenderSystems::Render),
230 );
231 }
232}
233
234/// Setups render target and cpu image for saving, changes scene state into render mode
235fn setup_render_target(
236 commands: &mut Commands,
237 images: &mut ResMut<Assets<Image>>,
238 render_device: &Res<RenderDevice>,
239 scene_controller: &mut ResMut<SceneController>,
240 pre_roll_frames: u32,
241 scene_name: String,
242) -> RenderTarget {
243 let size = Extent3d {
244 width: scene_controller.width,
245 height: scene_controller.height,
246 ..Default::default()
247 };
248
249 // This is the texture that will be rendered to.
250 let mut render_target_image =
251 Image::new_target_texture(size.width, size.height, TextureFormat::bevy_default());
252 render_target_image.texture_descriptor.usage |= TextureUsages::COPY_SRC;
253 let render_target_image_handle = images.add(render_target_image);
254
255 // This is the texture that will be copied to.
256 let cpu_image =
257 Image::new_target_texture(size.width, size.height, TextureFormat::bevy_default());
258 let cpu_image_handle = images.add(cpu_image);
259
260 commands.spawn(ImageCopier::new(
261 render_target_image_handle.clone(),
262 size,
263 render_device,
264 ));
265
266 commands.spawn(ImageToSave(cpu_image_handle));
267
268 scene_controller.state = SceneState::Render(pre_roll_frames);
269 scene_controller.name = scene_name;
270 RenderTarget::Image(render_target_image_handle.into())
271}
272
273/// Setups image saver
274pub struct CaptureFramePlugin;
275impl Plugin for CaptureFramePlugin {
276 fn build(&self, app: &mut App) {
277 info!("Adding CaptureFramePlugin");
278 app.add_systems(PostUpdate, update);
279 }
280}
281
282/// `ImageCopier` aggregator in `RenderWorld`
283#[derive(Clone, Default, Resource, Deref, DerefMut)]
284struct ImageCopiers(pub Vec<ImageCopier>);
285
286/// Used by `ImageCopyDriver` for copying from render target to buffer
287#[derive(Clone, Component)]
288struct ImageCopier {
289 buffer: Buffer,
290 enabled: Arc<AtomicBool>,
291 src_image: Handle<Image>,
292}
293
294impl ImageCopier {
295 pub fn new(
296 src_image: Handle<Image>,
297 size: Extent3d,
298 render_device: &RenderDevice,
299 ) -> ImageCopier {
300 let padded_bytes_per_row =
301 RenderDevice::align_copy_bytes_per_row((size.width) as usize) * 4;
302
303 let cpu_buffer = render_device.create_buffer(&BufferDescriptor {
304 label: None,
305 size: padded_bytes_per_row as u64 * size.height as u64,
306 usage: BufferUsages::MAP_READ | BufferUsages::COPY_DST,
307 mapped_at_creation: false,
308 });
309
310 ImageCopier {
311 buffer: cpu_buffer,
312 src_image,
313 enabled: Arc::new(AtomicBool::new(true)),
314 }
315 }
316
317 pub fn enabled(&self) -> bool {
318 self.enabled.load(Ordering::Relaxed)
319 }
320}
321
322/// Extracting `ImageCopier`s into render world, because `ImageCopyDriver` accesses them
323fn image_copy_extract(mut commands: Commands, image_copiers: Extract<Query<&ImageCopier>>) {
324 commands.insert_resource(ImageCopiers(
325 image_copiers.iter().cloned().collect::<Vec<ImageCopier>>(),
326 ));
327}
328
329/// `RenderGraph` label for `ImageCopyDriver`
330#[derive(Debug, PartialEq, Eq, Clone, Hash, RenderLabel)]
331struct ImageCopy;
332
333/// `RenderGraph` node
334#[derive(Default)]
335struct ImageCopyDriver;
336
337// Copies image content from render target to buffer
338impl render_graph::Node for ImageCopyDriver {
339 fn run(
340 &self,
341 _graph: &mut RenderGraphContext,
342 render_context: &mut RenderContext,
343 world: &World,
344 ) -> Result<(), NodeRunError> {
345 let image_copiers = world.get_resource::<ImageCopiers>().unwrap();
346 let gpu_images = world
347 .get_resource::<RenderAssets<bevy::render::texture::GpuImage>>()
348 .unwrap();
349
350 for image_copier in image_copiers.iter() {
351 if !image_copier.enabled() {
352 continue;
353 }
354
355 let src_image = gpu_images.get(&image_copier.src_image).unwrap();
356
357 let mut encoder = render_context
358 .render_device()
359 .create_command_encoder(&CommandEncoderDescriptor::default());
360
361 let block_dimensions = src_image.texture_format.block_dimensions();
362 let block_size = src_image.texture_format.block_copy_size(None).unwrap();
363
364 // Calculating correct size of image row because
365 // copy_texture_to_buffer can copy image only by rows aligned wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
366 // That's why image in buffer can be little bit wider
367 // This should be taken into account at copy from buffer stage
368 let padded_bytes_per_row = RenderDevice::align_copy_bytes_per_row(
369 (src_image.size.width as usize / block_dimensions.0 as usize) * block_size as usize,
370 );
371
372 encoder.copy_texture_to_buffer(
373 src_image.texture.as_image_copy(),
374 TexelCopyBufferInfo {
375 buffer: &image_copier.buffer,
376 layout: TexelCopyBufferLayout {
377 offset: 0,
378 bytes_per_row: Some(
379 std::num::NonZero::<u32>::new(padded_bytes_per_row as u32)
380 .unwrap()
381 .into(),
382 ),
383 rows_per_image: None,
384 },
385 },
386 src_image.size,
387 );
388
389 let render_queue = world.get_resource::<RenderQueue>().unwrap();
390 render_queue.submit(std::iter::once(encoder.finish()));
391 }
392
393 Ok(())
394 }
395}
396
397/// runs in render world after Render stage to send image from buffer via channel (receiver is in main world)
398fn receive_image_from_buffer(
399 image_copiers: Res<ImageCopiers>,
400 render_device: Res<RenderDevice>,
401 sender: Res<RenderWorldSender>,
402) {
403 for image_copier in image_copiers.0.iter() {
404 if !image_copier.enabled() {
405 continue;
406 }
407
408 // Finally time to get our data back from the gpu.
409 // First we get a buffer slice which represents a chunk of the buffer (which we
410 // can't access yet).
411 // We want the whole thing so use unbounded range.
412 let buffer_slice = image_copier.buffer.slice(..);
413
414 // Now things get complicated. WebGPU, for safety reasons, only allows either the GPU
415 // or CPU to access a buffer's contents at a time. We need to "map" the buffer which means
416 // flipping ownership of the buffer over to the CPU and making access legal. We do this
417 // with `BufferSlice::map_async`.
418 //
419 // The problem is that map_async is not an async function so we can't await it. What
420 // we need to do instead is pass in a closure that will be executed when the slice is
421 // either mapped or the mapping has failed.
422 //
423 // The problem with this is that we don't have a reliable way to wait in the main
424 // code for the buffer to be mapped and even worse, calling get_mapped_range or
425 // get_mapped_range_mut prematurely will cause a panic, not return an error.
426 //
427 // Using channels solves this as awaiting the receiving of a message from
428 // the passed closure will force the outside code to wait. It also doesn't hurt
429 // if the closure finishes before the outside code catches up as the message is
430 // buffered and receiving will just pick that up.
431 //
432 // It may also be worth noting that although on native, the usage of asynchronous
433 // channels is wholly unnecessary, for the sake of portability to Wasm
434 // we'll use async channels that work on both native and Wasm.
435
436 let (s, r) = crossbeam_channel::bounded(1);
437
438 // Maps the buffer so it can be read on the cpu
439 buffer_slice.map_async(MapMode::Read, move |r| match r {
440 // This will execute once the gpu is ready, so after the call to poll()
441 Ok(r) => s.send(r).expect("Failed to send map update"),
442 Err(err) => panic!("Failed to map buffer {err}"),
443 });
444
445 // In order for the mapping to be completed, one of three things must happen.
446 // One of those can be calling `Device::poll`. This isn't necessary on the web as devices
447 // are polled automatically but natively, we need to make sure this happens manually.
448 // `Maintain::Wait` will cause the thread to wait on native but not on WebGpu.
449
450 // This blocks until the gpu is done executing everything
451 render_device
452 .poll(PollType::Wait)
453 .expect("Failed to poll device for map async");
454
455 // This blocks until the buffer is mapped
456 r.recv().expect("Failed to receive the map_async message");
457
458 // This could fail on app exit, if Main world clears resources (including receiver) while Render world still renders
459 let _ = sender.send(buffer_slice.get_mapped_range().to_vec());
460
461 // We need to make sure all `BufferView`'s are dropped before we do what we're about
462 // to do.
463 // Unmap so that we can copy to the staging buffer in the next iteration.
464 image_copier.buffer.unmap();
465 }
466}
467
468/// CPU-side image for saving
469#[derive(Component, Deref, DerefMut)]
470struct ImageToSave(Handle<Image>);
471
472// Takes from channel image content sent from render world and saves it to disk
473fn update(
474 images_to_save: Query<&ImageToSave>,
475 receiver: Res<MainWorldReceiver>,
476 mut images: ResMut<Assets<Image>>,
477 mut scene_controller: ResMut<SceneController>,
478 mut app_exit_writer: MessageWriter<AppExit>,
479 mut file_number: Local<u32>,
480) {
481 if let SceneState::Render(n) = scene_controller.state {
482 if n < 1 {
483 // We don't want to block the main world on this,
484 // so we use try_recv which attempts to receive without blocking
485 let mut image_data = Vec::new();
486 while let Ok(data) = receiver.try_recv() {
487 // image generation could be faster than saving to fs,
488 // that's why use only last of them
489 image_data = data;
490 }
491 if !image_data.is_empty() {
492 for image in images_to_save.iter() {
493 // Fill correct data from channel to image
494 let img_bytes = images.get_mut(image.id()).unwrap();
495
496 // We need to ensure that this works regardless of the image dimensions
497 // If the image became wider when copying from the texture to the buffer,
498 // then the data is reduced to its original size when copying from the buffer to the image.
499 let row_bytes = img_bytes.width() as usize
500 * img_bytes.texture_descriptor.format.pixel_size().unwrap();
501 let aligned_row_bytes = RenderDevice::align_copy_bytes_per_row(row_bytes);
502 if row_bytes == aligned_row_bytes {
503 img_bytes.data.as_mut().unwrap().clone_from(&image_data);
504 } else {
505 // shrink data to original image size
506 img_bytes.data = Some(
507 image_data
508 .chunks(aligned_row_bytes)
509 .take(img_bytes.height() as usize)
510 .flat_map(|row| &row[..row_bytes.min(row.len())])
511 .cloned()
512 .collect(),
513 );
514 }
515
516 // Create RGBA Image Buffer
517 let img = match img_bytes.clone().try_into_dynamic() {
518 Ok(img) => img.to_rgba8(),
519 Err(e) => panic!("Failed to create image buffer {e:?}"),
520 };
521
522 // Prepare directory for images, test_images in bevy folder is used here for example
523 // You should choose the path depending on your needs
524 let images_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("test_images");
525 info!("Saving image to: {images_dir:?}");
526 std::fs::create_dir_all(&images_dir).unwrap();
527
528 // Choose filename starting from 000.png
529 let image_path = images_dir.join(format!("{:03}.png", file_number.deref()));
530 *file_number.deref_mut() += 1;
531
532 // Finally saving image to file, this heavy blocking operation is kept here
533 // for example simplicity, but in real app you should move it to a separate task
534 if let Err(e) = img.save(image_path) {
535 panic!("Failed to save image: {e}");
536 };
537 }
538 if scene_controller.single_image {
539 app_exit_writer.write(AppExit::Success);
540 }
541 }
542 } else {
543 // clears channel for skipped frames
544 while receiver.try_recv().is_ok() {}
545 scene_controller.state = SceneState::Render(n - 1);
546 }
547 }
548}