pub struct Bencher { /* private fields */ }Expand description
The Bencher provides iteration control for benchmarks.
Uses Criterion-style batched sampling:
- Warmup phase estimates iteration time
- Measurement phase batches iterations into samples
- Each sample = average of many iterations (reduces noise)
Implementations§
Source§impl Bencher
impl Bencher
Sourcepub fn with_config(track_allocations: bool, target_samples: usize) -> Self
pub fn with_config(track_allocations: bool, target_samples: usize) -> Self
Create a Bencher with custom sample count
Sourcepub fn set_iters_per_sample(&mut self, iters: u64)
pub fn set_iters_per_sample(&mut self, iters: u64)
Set iterations per sample (called after warmup estimation)
Sourcepub fn estimated_iter_time_ns(&self) -> Option<u64>
pub fn estimated_iter_time_ns(&self) -> Option<u64>
Get estimated iteration time from warmup (in nanoseconds)
Sourcepub fn start_measurement(&mut self, measurement_time_ns: u64)
pub fn start_measurement(&mut self, measurement_time_ns: u64)
Transition from warmup to measurement phase
Sourcepub fn iter<T, F>(&mut self, f: F)where
F: FnMut() -> T,
pub fn iter<T, F>(&mut self, f: F)where
F: FnMut() -> T,
Run the benchmark closure for one iteration.
During warmup: records individual timings for estimation. During measurement: accumulates into batched samples.
Sourcepub fn iter_with_setup<T, S, F, R>(&mut self, setup: S, routine: F)
pub fn iter_with_setup<T, S, F, R>(&mut self, setup: S, routine: F)
Run the benchmark with separate setup phase
Sourcepub fn iter_batched<T, S, F, R>(
&mut self,
batch_size: u64,
setup: S,
routine: F,
)
pub fn iter_batched<T, S, F, R>( &mut self, batch_size: u64, setup: S, routine: F, )
Run benchmark with batched iterations (user-specified batch size)
Sourcepub fn iter_async_standalone<T, F, Fut>(&mut self, f: F)
pub fn iter_async_standalone<T, F, Fut>(&mut self, f: F)
Run an async benchmark closure (standalone - creates its own runtime)
Sourcepub fn iter_async<T, F, Fut>(&mut self, f: F)
pub fn iter_async<T, F, Fut>(&mut self, f: F)
Run an async benchmark closure within an existing tokio runtime
Sourcepub fn has_enough_samples(&self) -> bool
pub fn has_enough_samples(&self) -> bool
Check if we’ve collected enough samples
Sourcepub fn take_samples(&mut self) -> Vec<Sample>
pub fn take_samples(&mut self) -> Vec<Sample>
Take ownership of collected samples (clears warmup data)
Sourcepub fn iteration_count(&self) -> u64
pub fn iteration_count(&self) -> u64
Get total iteration count
Sourcepub fn target_samples(&self) -> usize
pub fn target_samples(&self) -> usize
Get target sample count
Sourcepub fn finish(self) -> BenchmarkResult
pub fn finish(self) -> BenchmarkResult
Finalize and return results
Auto Trait Implementations§
impl !Freeze for Bencher
impl RefUnwindSafe for Bencher
impl Send for Bencher
impl Sync for Bencher
impl Unpin for Bencher
impl UnwindSafe for Bencher
Blanket Implementations§
Source§impl<T> ArchivePointee for T
impl<T> ArchivePointee for T
Source§type ArchivedMetadata = ()
type ArchivedMetadata = ()
Source§fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata,
) -> <T as Pointee>::Metadata
fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> LayoutRaw for T
impl<T> LayoutRaw for T
Source§fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>
fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>
Source§impl<T, N1, N2> Niching<NichedOption<T, N1>> for N2
impl<T, N1, N2> Niching<NichedOption<T, N1>> for N2
Source§unsafe fn is_niched(niched: *const NichedOption<T, N1>) -> bool
unsafe fn is_niched(niched: *const NichedOption<T, N1>) -> bool
Source§fn resolve_niched(out: Place<NichedOption<T, N1>>)
fn resolve_niched(out: Place<NichedOption<T, N1>>)
out indicating that a T is niched.