Struct bevy::render::render_resource::CommandEncoder
source · pub struct CommandEncoder { /* private fields */ }Expand description
Encodes a series of GPU operations.
A command encoder can record RenderPasses, ComputePasses,
and transfer operations between driver-managed resources like Buffers and Textures.
When finished recording, call CommandEncoder::finish to obtain a CommandBuffer which may
be submitted for execution.
Corresponds to WebGPU GPUCommandEncoder.
Implementations§
source§impl CommandEncoder
impl CommandEncoder
sourcepub fn finish(self) -> CommandBuffer
pub fn finish(self) -> CommandBuffer
Finishes recording and returns a CommandBuffer that can be submitted for execution.
Examples found in repository?
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let image_copiers = world.get_resource::<ImageCopiers>().unwrap();
let gpu_images = world
.get_resource::<RenderAssets<bevy::render::texture::GpuImage>>()
.unwrap();
for image_copier in image_copiers.iter() {
if !image_copier.enabled() {
continue;
}
let src_image = gpu_images.get(&image_copier.src_image).unwrap();
let mut encoder = render_context
.render_device()
.create_command_encoder(&CommandEncoderDescriptor::default());
let block_dimensions = src_image.texture_format.block_dimensions();
let block_size = src_image.texture_format.block_copy_size(None).unwrap();
// Calculating correct size of image row because
// copy_texture_to_buffer can copy image only by rows aligned wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
// That's why image in buffer can be little bit wider
// This should be taken into account at copy from buffer stage
let padded_bytes_per_row = RenderDevice::align_copy_bytes_per_row(
(src_image.size.x as usize / block_dimensions.0 as usize) * block_size as usize,
);
let texture_extent = Extent3d {
width: src_image.size.x,
height: src_image.size.y,
depth_or_array_layers: 1,
};
encoder.copy_texture_to_buffer(
src_image.texture.as_image_copy(),
ImageCopyBuffer {
buffer: &image_copier.buffer,
layout: ImageDataLayout {
offset: 0,
bytes_per_row: Some(
std::num::NonZeroU32::new(padded_bytes_per_row as u32)
.unwrap()
.into(),
),
rows_per_image: None,
},
},
texture_extent,
);
let render_queue = world.get_resource::<RenderQueue>().unwrap();
render_queue.submit(std::iter::once(encoder.finish()));
}
Ok(())
}sourcepub fn begin_render_pass<'pass>(
&'pass mut self,
desc: &RenderPassDescriptor<'pass, '_>,
) -> RenderPass<'pass>
pub fn begin_render_pass<'pass>( &'pass mut self, desc: &RenderPassDescriptor<'pass, '_>, ) -> RenderPass<'pass>
Begins recording of a render pass.
This function returns a RenderPass object which records a single render pass.
sourcepub fn begin_compute_pass(
&mut self,
desc: &ComputePassDescriptor<'_>,
) -> ComputePass<'_>
pub fn begin_compute_pass( &mut self, desc: &ComputePassDescriptor<'_>, ) -> ComputePass<'_>
Begins recording of a compute pass.
This function returns a ComputePass object which records a single compute pass.
Examples found in repository?
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
fn run(
&self,
_graph: &mut render_graph::RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), render_graph::NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = world.resource::<ComputePipeline>();
let bind_group = world.resource::<GpuBufferBindGroup>();
if let Some(init_pipeline) = pipeline_cache.get_compute_pipeline(pipeline.pipeline) {
let mut pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("GPU readback compute pass"),
..default()
});
pass.set_bind_group(0, &bind_group.0, &[]);
pass.set_pipeline(init_pipeline);
pass.dispatch_workgroups(BUFFER_LEN as u32, 1, 1);
}
// Copy the gpu accessible buffer to the cpu accessible buffer
let buffers = world.resource::<Buffers>();
render_context.command_encoder().copy_buffer_to_buffer(
buffers
.gpu_buffer
.buffer()
.expect("Buffer should have already been uploaded to the gpu"),
0,
&buffers.cpu_buffer,
0,
(BUFFER_LEN * std::mem::size_of::<u32>()) as u64,
);
Ok(())
}More examples
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
fn run(
&self,
_graph: &mut render_graph::RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), render_graph::NodeRunError> {
let bind_groups = &world.resource::<GameOfLifeImageBindGroups>().0;
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = world.resource::<GameOfLifePipeline>();
let mut pass = render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor::default());
// select the pipeline based on the current state
match self.state {
GameOfLifeState::Loading => {}
GameOfLifeState::Init => {
let init_pipeline = pipeline_cache
.get_compute_pipeline(pipeline.init_pipeline)
.unwrap();
pass.set_bind_group(0, &bind_groups[0], &[]);
pass.set_pipeline(init_pipeline);
pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
}
GameOfLifeState::Update(index) => {
let update_pipeline = pipeline_cache
.get_compute_pipeline(pipeline.update_pipeline)
.unwrap();
pass.set_bind_group(0, &bind_groups[index], &[]);
pass.set_pipeline(update_pipeline);
pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
}
}
Ok(())
}sourcepub fn copy_buffer_to_buffer(
&mut self,
source: &Buffer,
source_offset: u64,
destination: &Buffer,
destination_offset: u64,
copy_size: u64,
)
pub fn copy_buffer_to_buffer( &mut self, source: &Buffer, source_offset: u64, destination: &Buffer, destination_offset: u64, copy_size: u64, )
Copy data from one buffer to another.
§Panics
- Buffer offsets or copy size not a multiple of
COPY_BUFFER_ALIGNMENT. - Copy would overrun buffer.
- Copy within the same buffer.
Examples found in repository?
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
fn run(
&self,
_graph: &mut render_graph::RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), render_graph::NodeRunError> {
let pipeline_cache = world.resource::<PipelineCache>();
let pipeline = world.resource::<ComputePipeline>();
let bind_group = world.resource::<GpuBufferBindGroup>();
if let Some(init_pipeline) = pipeline_cache.get_compute_pipeline(pipeline.pipeline) {
let mut pass =
render_context
.command_encoder()
.begin_compute_pass(&ComputePassDescriptor {
label: Some("GPU readback compute pass"),
..default()
});
pass.set_bind_group(0, &bind_group.0, &[]);
pass.set_pipeline(init_pipeline);
pass.dispatch_workgroups(BUFFER_LEN as u32, 1, 1);
}
// Copy the gpu accessible buffer to the cpu accessible buffer
let buffers = world.resource::<Buffers>();
render_context.command_encoder().copy_buffer_to_buffer(
buffers
.gpu_buffer
.buffer()
.expect("Buffer should have already been uploaded to the gpu"),
0,
&buffers.cpu_buffer,
0,
(BUFFER_LEN * std::mem::size_of::<u32>()) as u64,
);
Ok(())
}sourcepub fn copy_buffer_to_texture(
&mut self,
source: ImageCopyBuffer<&Buffer>,
destination: ImageCopyTexture<&Texture>,
copy_size: Extent3d,
)
pub fn copy_buffer_to_texture( &mut self, source: ImageCopyBuffer<&Buffer>, destination: ImageCopyTexture<&Texture>, copy_size: Extent3d, )
Copy data from a buffer to a texture.
sourcepub fn copy_texture_to_buffer(
&mut self,
source: ImageCopyTexture<&Texture>,
destination: ImageCopyBuffer<&Buffer>,
copy_size: Extent3d,
)
pub fn copy_texture_to_buffer( &mut self, source: ImageCopyTexture<&Texture>, destination: ImageCopyBuffer<&Buffer>, copy_size: Extent3d, )
Copy data from a texture to a buffer.
Examples found in repository?
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
world: &World,
) -> Result<(), NodeRunError> {
let image_copiers = world.get_resource::<ImageCopiers>().unwrap();
let gpu_images = world
.get_resource::<RenderAssets<bevy::render::texture::GpuImage>>()
.unwrap();
for image_copier in image_copiers.iter() {
if !image_copier.enabled() {
continue;
}
let src_image = gpu_images.get(&image_copier.src_image).unwrap();
let mut encoder = render_context
.render_device()
.create_command_encoder(&CommandEncoderDescriptor::default());
let block_dimensions = src_image.texture_format.block_dimensions();
let block_size = src_image.texture_format.block_copy_size(None).unwrap();
// Calculating correct size of image row because
// copy_texture_to_buffer can copy image only by rows aligned wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
// That's why image in buffer can be little bit wider
// This should be taken into account at copy from buffer stage
let padded_bytes_per_row = RenderDevice::align_copy_bytes_per_row(
(src_image.size.x as usize / block_dimensions.0 as usize) * block_size as usize,
);
let texture_extent = Extent3d {
width: src_image.size.x,
height: src_image.size.y,
depth_or_array_layers: 1,
};
encoder.copy_texture_to_buffer(
src_image.texture.as_image_copy(),
ImageCopyBuffer {
buffer: &image_copier.buffer,
layout: ImageDataLayout {
offset: 0,
bytes_per_row: Some(
std::num::NonZeroU32::new(padded_bytes_per_row as u32)
.unwrap()
.into(),
),
rows_per_image: None,
},
},
texture_extent,
);
let render_queue = world.get_resource::<RenderQueue>().unwrap();
render_queue.submit(std::iter::once(encoder.finish()));
}
Ok(())
}sourcepub fn copy_texture_to_texture(
&mut self,
source: ImageCopyTexture<&Texture>,
destination: ImageCopyTexture<&Texture>,
copy_size: Extent3d,
)
pub fn copy_texture_to_texture( &mut self, source: ImageCopyTexture<&Texture>, destination: ImageCopyTexture<&Texture>, copy_size: Extent3d, )
Copy data from one texture to another.
§Panics
- Textures are not the same type
- If a depth texture, or a multisampled texture, the entire texture must be copied
- Copy would overrun either texture
sourcepub fn clear_texture(
&mut self,
texture: &Texture,
subresource_range: &ImageSubresourceRange,
)
pub fn clear_texture( &mut self, texture: &Texture, subresource_range: &ImageSubresourceRange, )
Clears texture to zero.
Note that unlike with clear_buffer, COPY_DST usage is not required.
§Implementation notes
- implemented either via buffer copies and render/depth target clear, path depends on texture usages
- behaves like texture zero init, but is performed immediately (clearing is not delayed via marking it as uninitialized)
§Panics
CLEAR_TEXTUREextension not enabled- Range is out of bounds
sourcepub fn insert_debug_marker(&mut self, label: &str)
pub fn insert_debug_marker(&mut self, label: &str)
Inserts debug marker.
sourcepub fn push_debug_group(&mut self, label: &str)
pub fn push_debug_group(&mut self, label: &str)
Start record commands and group it into debug marker group.
sourcepub fn pop_debug_group(&mut self)
pub fn pop_debug_group(&mut self)
Stops command recording and creates debug group.
sourcepub fn resolve_query_set(
&mut self,
query_set: &QuerySet,
query_range: Range<u32>,
destination: &Buffer,
destination_offset: u64,
)
pub fn resolve_query_set( &mut self, query_set: &QuerySet, query_range: Range<u32>, destination: &Buffer, destination_offset: u64, )
Resolves a query set, writing the results into the supplied destination buffer.
Occlusion and timestamp queries are 8 bytes each (see crate::QUERY_SIZE). For pipeline statistics queries,
see PipelineStatisticsTypes for more information.
sourcepub unsafe fn as_hal_mut<A, F, R>(
&mut self,
hal_command_encoder_callback: F,
) -> Option<R>
Available on wgpu_core only.
pub unsafe fn as_hal_mut<A, F, R>( &mut self, hal_command_encoder_callback: F, ) -> Option<R>
wgpu_core only.Returns the inner hal CommandEncoder using a callback. The hal command encoder will be None if the
backend type argument does not match with this wgpu CommandEncoder
This method will start the wgpu_core level command recording.
§Safety
- The raw handle obtained from the hal CommandEncoder must not be manually destroyed
source§impl CommandEncoder
impl CommandEncoder
Features::TIMESTAMP_QUERY_INSIDE_ENCODERS must be enabled on the device in order to call these functions.
sourcepub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32)
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32)
Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
Must be multiplied by Queue::get_timestamp_period to get
the value in nanoseconds. Absolute values have no meaning,
but timestamps can be subtracted to get the time it takes
for a string of operations to complete.
Attention: Since commands within a command recorder may be reordered, there is no strict guarantee that timestamps are taken after all commands recorded so far and all before all commands recorded after. This may depend both on the backend and the driver.
Trait Implementations§
source§impl Debug for CommandEncoder
impl Debug for CommandEncoder
Auto Trait Implementations§
impl Freeze for CommandEncoder
impl !RefUnwindSafe for CommandEncoder
impl Send for CommandEncoder
impl Sync for CommandEncoder
impl Unpin for CommandEncoder
impl !UnwindSafe for CommandEncoder
Blanket Implementations§
source§impl<T, U> AsBindGroupShaderType<U> for T
impl<T, U> AsBindGroupShaderType<U> for T
source§fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
T ShaderType for self. When used in AsBindGroup
derives, it is safe to assume that all images in self exist.source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
source§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
source§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait> (where Trait: Downcast) to Box<dyn Any>. Box<dyn Any> can
then be further downcast into Box<ConcreteType> where ConcreteType implements Trait.source§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait> (where Trait: Downcast) to Rc<Any>. Rc<Any> can then be
further downcast into Rc<ConcreteType> where ConcreteType implements Trait.source§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot
generate &Any’s vtable from &Trait’s.source§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait (where Trait: Downcast) to &Any. This is needed since Rust cannot
generate &mut Any’s vtable from &mut Trait’s.source§impl<T> DowncastSync for T
impl<T> DowncastSync for T
source§impl<S> FromSample<S> for S
impl<S> FromSample<S> for S
fn from_sample_(s: S) -> S
source§impl<T> Instrument for T
impl<T> Instrument for T
source§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
source§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
source§impl<T> IntoEither for T
impl<T> IntoEither for T
source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moresource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more