pub struct CommandEncoder { /* private fields */ }
Expand description
Encodes a series of GPU operations.
A command encoder can record RenderPass
es, ComputePass
es,
and transfer operations between driver-managed resources like Buffer
s and Texture
s.
When finished recording, call CommandEncoder::finish
to obtain a CommandBuffer
which may
be submitted for execution.
Corresponds to WebGPU GPUCommandEncoder
.
Implementations§
Source§impl CommandEncoder
impl CommandEncoder
Sourcepub fn finish(self) -> CommandBuffer
pub fn finish(self) -> CommandBuffer
Finishes recording and returns a CommandBuffer
that can be submitted for execution.
Examples found in repository?
340 fn run(
341 &self,
342 _graph: &mut RenderGraphContext,
343 render_context: &mut RenderContext,
344 world: &World,
345 ) -> Result<(), NodeRunError> {
346 let image_copiers = world.get_resource::<ImageCopiers>().unwrap();
347 let gpu_images = world
348 .get_resource::<RenderAssets<bevy::render::texture::GpuImage>>()
349 .unwrap();
350
351 for image_copier in image_copiers.iter() {
352 if !image_copier.enabled() {
353 continue;
354 }
355
356 let src_image = gpu_images.get(&image_copier.src_image).unwrap();
357
358 let mut encoder = render_context
359 .render_device()
360 .create_command_encoder(&CommandEncoderDescriptor::default());
361
362 let block_dimensions = src_image.texture_format.block_dimensions();
363 let block_size = src_image.texture_format.block_copy_size(None).unwrap();
364
365 // Calculating correct size of image row because
366 // copy_texture_to_buffer can copy image only by rows aligned wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
367 // That's why image in buffer can be little bit wider
368 // This should be taken into account at copy from buffer stage
369 let padded_bytes_per_row = RenderDevice::align_copy_bytes_per_row(
370 (src_image.size.width as usize / block_dimensions.0 as usize) * block_size as usize,
371 );
372
373 encoder.copy_texture_to_buffer(
374 src_image.texture.as_image_copy(),
375 TexelCopyBufferInfo {
376 buffer: &image_copier.buffer,
377 layout: TexelCopyBufferLayout {
378 offset: 0,
379 bytes_per_row: Some(
380 std::num::NonZero::<u32>::new(padded_bytes_per_row as u32)
381 .unwrap()
382 .into(),
383 ),
384 rows_per_image: None,
385 },
386 },
387 src_image.size,
388 );
389
390 let render_queue = world.get_resource::<RenderQueue>().unwrap();
391 render_queue.submit(std::iter::once(encoder.finish()));
392 }
393
394 Ok(())
395 }
Sourcepub fn begin_render_pass<'encoder>(
&'encoder mut self,
desc: &RenderPassDescriptor<'_>,
) -> RenderPass<'encoder>
pub fn begin_render_pass<'encoder>( &'encoder mut self, desc: &RenderPassDescriptor<'_>, ) -> RenderPass<'encoder>
Begins recording of a render pass.
This function returns a RenderPass
object which records a single render pass.
As long as the returned RenderPass
has not ended,
any mutating operation on this command encoder causes an error and invalidates it.
Note that the 'encoder
lifetime relationship protects against this,
but it is possible to opt out of it by calling RenderPass::forget_lifetime
.
This can be useful for runtime handling of the encoder->pass
dependency e.g. when pass and encoder are stored in the same data structure.
Sourcepub fn begin_compute_pass<'encoder>(
&'encoder mut self,
desc: &ComputePassDescriptor<'_>,
) -> ComputePass<'encoder>
pub fn begin_compute_pass<'encoder>( &'encoder mut self, desc: &ComputePassDescriptor<'_>, ) -> ComputePass<'encoder>
Begins recording of a compute pass.
This function returns a ComputePass
object which records a single compute pass.
As long as the returned ComputePass
has not ended,
any mutating operation on this command encoder causes an error and invalidates it.
Note that the 'encoder
lifetime relationship protects against this,
but it is possible to opt out of it by calling ComputePass::forget_lifetime
.
This can be useful for runtime handling of the encoder->pass
dependency e.g. when pass and encoder are stored in the same data structure.
Examples found in repository?
197 fn run(
198 &self,
199 _graph: &mut render_graph::RenderGraphContext,
200 render_context: &mut RenderContext,
201 world: &World,
202 ) -> Result<(), render_graph::NodeRunError> {
203 let pipeline_cache = world.resource::<PipelineCache>();
204 let pipeline = world.resource::<ComputePipeline>();
205 let bind_group = world.resource::<GpuBufferBindGroup>();
206
207 if let Some(init_pipeline) = pipeline_cache.get_compute_pipeline(pipeline.pipeline) {
208 let mut pass =
209 render_context
210 .command_encoder()
211 .begin_compute_pass(&ComputePassDescriptor {
212 label: Some("GPU readback compute pass"),
213 ..default()
214 });
215
216 pass.set_bind_group(0, &bind_group.0, &[]);
217 pass.set_pipeline(init_pipeline);
218 pass.dispatch_workgroups(BUFFER_LEN as u32, 1, 1);
219 }
220 Ok(())
221 }
More examples
255 fn run(
256 &self,
257 _graph: &mut render_graph::RenderGraphContext,
258 render_context: &mut RenderContext,
259 world: &World,
260 ) -> Result<(), render_graph::NodeRunError> {
261 let bind_groups = &world.resource::<GameOfLifeImageBindGroups>().0;
262 let pipeline_cache = world.resource::<PipelineCache>();
263 let pipeline = world.resource::<GameOfLifePipeline>();
264
265 let mut pass = render_context
266 .command_encoder()
267 .begin_compute_pass(&ComputePassDescriptor::default());
268
269 // select the pipeline based on the current state
270 match self.state {
271 GameOfLifeState::Loading => {}
272 GameOfLifeState::Init => {
273 let init_pipeline = pipeline_cache
274 .get_compute_pipeline(pipeline.init_pipeline)
275 .unwrap();
276 pass.set_bind_group(0, &bind_groups[0], &[]);
277 pass.set_pipeline(init_pipeline);
278 pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
279 }
280 GameOfLifeState::Update(index) => {
281 let update_pipeline = pipeline_cache
282 .get_compute_pipeline(pipeline.update_pipeline)
283 .unwrap();
284 pass.set_bind_group(0, &bind_groups[index], &[]);
285 pass.set_pipeline(update_pipeline);
286 pass.dispatch_workgroups(SIZE.0 / WORKGROUP_SIZE, SIZE.1 / WORKGROUP_SIZE, 1);
287 }
288 }
289
290 Ok(())
291 }
Sourcepub fn copy_buffer_to_buffer(
&mut self,
source: &Buffer,
source_offset: u64,
destination: &Buffer,
destination_offset: u64,
copy_size: u64,
)
pub fn copy_buffer_to_buffer( &mut self, source: &Buffer, source_offset: u64, destination: &Buffer, destination_offset: u64, copy_size: u64, )
Copy data from one buffer to another.
§Panics
- Buffer offsets or copy size not a multiple of
COPY_BUFFER_ALIGNMENT
. - Copy would overrun buffer.
- Copy within the same buffer.
Examples found in repository?
419 fn run<'w>(
420 &self,
421 _: &mut RenderGraphContext,
422 render_context: &mut RenderContext<'w>,
423 world: &'w World,
424 ) -> Result<(), NodeRunError> {
425 // Extract the buffers that hold the GPU indirect draw parameters from
426 // the world resources. We're going to read those buffers to determine
427 // how many meshes were actually drawn.
428 let (Some(indirect_parameters_buffers), Some(indirect_parameters_mapping_buffers)) = (
429 world.get_resource::<IndirectParametersBuffers>(),
430 world.get_resource::<IndirectParametersStagingBuffers>(),
431 ) else {
432 return Ok(());
433 };
434
435 // Get the indirect parameters buffers corresponding to the opaque 3D
436 // phase, since all our meshes are in that phase.
437 let Some(phase_indirect_parameters_buffers) =
438 indirect_parameters_buffers.get(&TypeId::of::<Opaque3d>())
439 else {
440 return Ok(());
441 };
442
443 // Grab both the buffers we're copying from and the staging buffers
444 // we're copying to. Remember that we can't map the indirect parameters
445 // buffers directly, so we have to copy their contents to a staging
446 // buffer.
447 let (
448 Some(indexed_data_buffer),
449 Some(indexed_batch_sets_buffer),
450 Some(indirect_parameters_staging_data_buffer),
451 Some(indirect_parameters_staging_batch_sets_buffer),
452 ) = (
453 phase_indirect_parameters_buffers.indexed.data_buffer(),
454 phase_indirect_parameters_buffers
455 .indexed
456 .batch_sets_buffer(),
457 indirect_parameters_mapping_buffers.data.as_ref(),
458 indirect_parameters_mapping_buffers.batch_sets.as_ref(),
459 )
460 else {
461 return Ok(());
462 };
463
464 // Copy from the indirect parameters buffers to the staging buffers.
465 render_context.command_encoder().copy_buffer_to_buffer(
466 indexed_data_buffer,
467 0,
468 indirect_parameters_staging_data_buffer,
469 0,
470 indexed_data_buffer.size(),
471 );
472 render_context.command_encoder().copy_buffer_to_buffer(
473 indexed_batch_sets_buffer,
474 0,
475 indirect_parameters_staging_batch_sets_buffer,
476 0,
477 indexed_batch_sets_buffer.size(),
478 );
479
480 Ok(())
481 }
Sourcepub fn copy_buffer_to_texture(
&mut self,
source: TexelCopyBufferInfo<&Buffer>,
destination: TexelCopyTextureInfo<&Texture>,
copy_size: Extent3d,
)
pub fn copy_buffer_to_texture( &mut self, source: TexelCopyBufferInfo<&Buffer>, destination: TexelCopyTextureInfo<&Texture>, copy_size: Extent3d, )
Copy data from a buffer to a texture.
Sourcepub fn copy_texture_to_buffer(
&mut self,
source: TexelCopyTextureInfo<&Texture>,
destination: TexelCopyBufferInfo<&Buffer>,
copy_size: Extent3d,
)
pub fn copy_texture_to_buffer( &mut self, source: TexelCopyTextureInfo<&Texture>, destination: TexelCopyBufferInfo<&Buffer>, copy_size: Extent3d, )
Copy data from a texture to a buffer.
Examples found in repository?
340 fn run(
341 &self,
342 _graph: &mut RenderGraphContext,
343 render_context: &mut RenderContext,
344 world: &World,
345 ) -> Result<(), NodeRunError> {
346 let image_copiers = world.get_resource::<ImageCopiers>().unwrap();
347 let gpu_images = world
348 .get_resource::<RenderAssets<bevy::render::texture::GpuImage>>()
349 .unwrap();
350
351 for image_copier in image_copiers.iter() {
352 if !image_copier.enabled() {
353 continue;
354 }
355
356 let src_image = gpu_images.get(&image_copier.src_image).unwrap();
357
358 let mut encoder = render_context
359 .render_device()
360 .create_command_encoder(&CommandEncoderDescriptor::default());
361
362 let block_dimensions = src_image.texture_format.block_dimensions();
363 let block_size = src_image.texture_format.block_copy_size(None).unwrap();
364
365 // Calculating correct size of image row because
366 // copy_texture_to_buffer can copy image only by rows aligned wgpu::COPY_BYTES_PER_ROW_ALIGNMENT
367 // That's why image in buffer can be little bit wider
368 // This should be taken into account at copy from buffer stage
369 let padded_bytes_per_row = RenderDevice::align_copy_bytes_per_row(
370 (src_image.size.width as usize / block_dimensions.0 as usize) * block_size as usize,
371 );
372
373 encoder.copy_texture_to_buffer(
374 src_image.texture.as_image_copy(),
375 TexelCopyBufferInfo {
376 buffer: &image_copier.buffer,
377 layout: TexelCopyBufferLayout {
378 offset: 0,
379 bytes_per_row: Some(
380 std::num::NonZero::<u32>::new(padded_bytes_per_row as u32)
381 .unwrap()
382 .into(),
383 ),
384 rows_per_image: None,
385 },
386 },
387 src_image.size,
388 );
389
390 let render_queue = world.get_resource::<RenderQueue>().unwrap();
391 render_queue.submit(std::iter::once(encoder.finish()));
392 }
393
394 Ok(())
395 }
Sourcepub fn copy_texture_to_texture(
&mut self,
source: TexelCopyTextureInfo<&Texture>,
destination: TexelCopyTextureInfo<&Texture>,
copy_size: Extent3d,
)
pub fn copy_texture_to_texture( &mut self, source: TexelCopyTextureInfo<&Texture>, destination: TexelCopyTextureInfo<&Texture>, copy_size: Extent3d, )
Copy data from one texture to another.
§Panics
- Textures are not the same type
- If a depth texture, or a multisampled texture, the entire texture must be copied
- Copy would overrun either texture
Sourcepub fn clear_texture(
&mut self,
texture: &Texture,
subresource_range: &ImageSubresourceRange,
)
pub fn clear_texture( &mut self, texture: &Texture, subresource_range: &ImageSubresourceRange, )
Clears texture to zero.
Note that unlike with clear_buffer, COPY_DST
usage is not required.
§Implementation notes
- implemented either via buffer copies and render/depth target clear, path depends on texture usages
- behaves like texture zero init, but is performed immediately (clearing is not delayed via marking it as uninitialized)
§Panics
CLEAR_TEXTURE
extension not enabled- Range is out of bounds
Sourcepub fn insert_debug_marker(&mut self, label: &str)
pub fn insert_debug_marker(&mut self, label: &str)
Inserts debug marker.
Sourcepub fn push_debug_group(&mut self, label: &str)
pub fn push_debug_group(&mut self, label: &str)
Start record commands and group it into debug marker group.
Sourcepub fn pop_debug_group(&mut self)
pub fn pop_debug_group(&mut self)
Stops command recording and creates debug group.
Sourcepub fn resolve_query_set(
&mut self,
query_set: &QuerySet,
query_range: Range<u32>,
destination: &Buffer,
destination_offset: u64,
)
pub fn resolve_query_set( &mut self, query_set: &QuerySet, query_range: Range<u32>, destination: &Buffer, destination_offset: u64, )
Resolves a query set, writing the results into the supplied destination buffer.
Occlusion and timestamp queries are 8 bytes each (see crate::QUERY_SIZE
). For pipeline statistics queries,
see PipelineStatisticsTypes
for more information.
Sourcepub unsafe fn as_hal_mut<A, F, R>(
&mut self,
hal_command_encoder_callback: F,
) -> R
pub unsafe fn as_hal_mut<A, F, R>( &mut self, hal_command_encoder_callback: F, ) -> R
Returns the inner hal CommandEncoder using a callback. The hal command encoder will be None
if the
backend type argument does not match with this wgpu CommandEncoder
This method will start the wgpu_core level command recording.
§Safety
- The raw handle obtained from the hal CommandEncoder must not be manually destroyed
Source§impl CommandEncoder
Features::TIMESTAMP_QUERY_INSIDE_ENCODERS
must be enabled on the device in order to call these functions.
impl CommandEncoder
Features::TIMESTAMP_QUERY_INSIDE_ENCODERS
must be enabled on the device in order to call these functions.
Sourcepub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32)
pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32)
Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
Must be multiplied by Queue::get_timestamp_period
to get
the value in nanoseconds. Absolute values have no meaning,
but timestamps can be subtracted to get the time it takes
for a string of operations to complete.
Attention: Since commands within a command recorder may be reordered, there is no strict guarantee that timestamps are taken after all commands recorded so far and all before all commands recorded after. This may depend both on the backend and the driver.
Source§impl CommandEncoder
Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
must be enabled on the device in order to call these functions.
impl CommandEncoder
Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE
must be enabled on the device in order to call these functions.
Sourcepub fn build_acceleration_structures<'a>(
&mut self,
blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>,
tlas: impl IntoIterator<Item = &'a TlasPackage>,
)
pub fn build_acceleration_structures<'a>( &mut self, blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>, tlas: impl IntoIterator<Item = &'a TlasPackage>, )
Build bottom and top level acceleration structures.
Builds the BLASes then the TLASes, but does not build the BLASes into the TLASes, that must be done by setting a TLAS instance in the TLAS package to one that contains the BLAS (and with an appropriate transform)
§Validation
- blas: Iterator of bottom level acceleration structure entries to build.
For each entry, the provided size descriptor must be strictly smaller or equal to the descriptor given at BLAS creation, this means:
- Less or equal number of geometries
- Same kind of geometry (with index buffer or without) (same vertex/index format)
- Same flags
- Less or equal number of vertices
- Less or equal number of indices (if applicable)
- tlas: iterator of top level acceleration structure packages to build
For each entry:
- Each BLAS in each TLAS instance must have been being built in the current call or in a previous call to
build_acceleration_structures
orbuild_acceleration_structures_unsafe_tlas
- The number of TLAS instances must be less than or equal to the max number of tlas instances when creating (if creating a package with
TlasPackage::new()
this is already satisfied)
- Each BLAS in each TLAS instance must have been being built in the current call or in a previous call to
If the device the command encoder is created from does not have Features::EXPERIMENTAL_RAY_TRACING_ACCELERATION_STRUCTURE enabled then a validation error is generated
A bottom level acceleration structure may be build and used as a reference in a top level acceleration structure in the same invocation of this function.
§Bind group usage
When a top level acceleration structure is used in a bind group, some validation takes place:
- The top level acceleration structure is valid and has been built.
- All the bottom level acceleration structures referenced by the top level acceleration structure are valid and have been built prior, or at same time as the containing top level acceleration structure.
Sourcepub unsafe fn build_acceleration_structures_unsafe_tlas<'a>(
&mut self,
blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>,
tlas: impl IntoIterator<Item = &'a TlasBuildEntry<'a>>,
)
pub unsafe fn build_acceleration_structures_unsafe_tlas<'a>( &mut self, blas: impl IntoIterator<Item = &'a BlasBuildEntry<'a>>, tlas: impl IntoIterator<Item = &'a TlasBuildEntry<'a>>, )
Build bottom and top level acceleration structures.
See CommandEncoder::build_acceleration_structures
for the safe version and more details. All validation in CommandEncoder::build_acceleration_structures
except that
listed under tlas applies here as well.
§Safety
- The contents of the raw instance buffer must be valid for the underling api.
- All bottom level acceleration structures, referenced in the raw instance buffer must be valid and built, when the corresponding top level acceleration structure is built. (builds may happen in the same invocation of this function).
- At the time when the top level acceleration structure is used in a bind group, all associated bottom level acceleration structures must be valid, and built (no later than the time when the top level acceleration structure was built).
Trait Implementations§
Source§impl Debug for CommandEncoder
impl Debug for CommandEncoder
Source§impl Hash for CommandEncoder
impl Hash for CommandEncoder
Source§impl Ord for CommandEncoder
impl Ord for CommandEncoder
Source§fn cmp(&self, other: &CommandEncoder) -> Ordering
fn cmp(&self, other: &CommandEncoder) -> Ordering
1.21.0 · Source§fn max(self, other: Self) -> Selfwhere
Self: Sized,
fn max(self, other: Self) -> Selfwhere
Self: Sized,
Source§impl PartialEq for CommandEncoder
impl PartialEq for CommandEncoder
Source§impl PartialOrd for CommandEncoder
impl PartialOrd for CommandEncoder
impl Eq for CommandEncoder
Auto Trait Implementations§
impl Freeze for CommandEncoder
impl !RefUnwindSafe for CommandEncoder
impl Send for CommandEncoder
impl Sync for CommandEncoder
impl Unpin for CommandEncoder
impl !UnwindSafe for CommandEncoder
Blanket Implementations§
Source§impl<T, U> AsBindGroupShaderType<U> for T
impl<T, U> AsBindGroupShaderType<U> for T
Source§fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
fn as_bind_group_shader_type(&self, _images: &RenderAssets<GpuImage>) -> U
T
ShaderType
for self
. When used in AsBindGroup
derives, it is safe to assume that all images in self
exist.Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<Q, K> Comparable<K> for Q
impl<Q, K> Comparable<K> for Q
Source§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
Source§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait>
(where Trait: Downcast
) to Box<dyn Any>
, which can then be
downcast
into Box<dyn ConcreteType>
where ConcreteType
implements Trait
.Source§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait>
(where Trait: Downcast
) to Rc<Any>
, which can then be further
downcast
into Rc<ConcreteType>
where ConcreteType
implements Trait
.Source§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &Any
’s vtable from &Trait
’s.Source§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &mut Any
’s vtable from &mut Trait
’s.Source§impl<T> Downcast for Twhere
T: Any,
impl<T> Downcast for Twhere
T: Any,
Source§fn into_any(self: Box<T>) -> Box<dyn Any>
fn into_any(self: Box<T>) -> Box<dyn Any>
Box<dyn Trait>
(where Trait: Downcast
) to Box<dyn Any>
. Box<dyn Any>
can
then be further downcast
into Box<ConcreteType>
where ConcreteType
implements Trait
.Source§fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
fn into_any_rc(self: Rc<T>) -> Rc<dyn Any>
Rc<Trait>
(where Trait: Downcast
) to Rc<Any>
. Rc<Any>
can then be
further downcast
into Rc<ConcreteType>
where ConcreteType
implements Trait
.Source§fn as_any(&self) -> &(dyn Any + 'static)
fn as_any(&self) -> &(dyn Any + 'static)
&Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &Any
’s vtable from &Trait
’s.Source§fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
fn as_any_mut(&mut self) -> &mut (dyn Any + 'static)
&mut Trait
(where Trait: Downcast
) to &Any
. This is needed since Rust cannot
generate &mut Any
’s vtable from &mut Trait
’s.Source§impl<T> DowncastSend for T
impl<T> DowncastSend for T
Source§impl<T> DowncastSync for T
impl<T> DowncastSync for T
Source§impl<Q, K> Equivalent<K> for Q
impl<Q, K> Equivalent<K> for Q
Source§fn equivalent(&self, key: &K) -> bool
fn equivalent(&self, key: &K) -> bool
key
and return true
if they are equal.Source§impl<T> FmtForward for T
impl<T> FmtForward for T
Source§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
self
to use its Binary
implementation when Debug
-formatted.Source§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
self
to use its Display
implementation when
Debug
-formatted.Source§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
self
to use its LowerExp
implementation when
Debug
-formatted.Source§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
self
to use its LowerHex
implementation when
Debug
-formatted.Source§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
self
to use its Octal
implementation when Debug
-formatted.Source§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
self
to use its Pointer
implementation when
Debug
-formatted.Source§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
self
to use its UpperExp
implementation when
Debug
-formatted.Source§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
self
to use its UpperHex
implementation when
Debug
-formatted.Source§impl<S> FromSample<S> for S
impl<S> FromSample<S> for S
fn from_sample_(s: S) -> S
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
fn instrument(self, span: Span) -> Instrumented<Self> ⓘ
Source§fn in_current_span(self) -> Instrumented<Self> ⓘ
fn in_current_span(self) -> Instrumented<Self> ⓘ
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
fn into_either(self, into_left: bool) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left
is true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self> ⓘ
self
into a Left
variant of Either<Self, Self>
if into_left(&self)
returns true
.
Converts self
into a Right
variant of Either<Self, Self>
otherwise. Read moreSource§impl<F, T> IntoSample<T> for Fwhere
T: FromSample<F>,
impl<F, T> IntoSample<T> for Fwhere
T: FromSample<F>,
fn into_sample(self) -> T
Source§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
Source§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
Source§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
self
and passes that borrow into the pipe function. Read moreSource§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
self
and passes that borrow into the pipe function. Read moreSource§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
Source§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
Source§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
self
, then passes self.as_ref()
into the pipe function.Source§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
self
, then passes self.as_mut()
into the pipe
function.Source§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
self
, then passes self.deref()
into the pipe function.Source§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<T> Tap for T
impl<T> Tap for T
Source§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Borrow<B>
of a value. Read moreSource§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
BorrowMut<B>
of a value. Read moreSource§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
AsRef<R>
view of a value. Read moreSource§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
AsMut<R>
view of a value. Read moreSource§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Deref::Target
of a value. Read moreSource§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Deref::Target
of a value. Read moreSource§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
.tap()
only in debug builds, and is erased in release builds.Source§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
.tap_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
.tap_borrow()
only in debug builds, and is erased in release
builds.Source§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
.tap_borrow_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
.tap_ref()
only in debug builds, and is erased in release
builds.Source§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
.tap_ref_mut()
only in debug builds, and is erased in release
builds.Source§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
.tap_deref()
only in debug builds, and is erased in release
builds.