pub struct GpuContext<C> {Show 46 fields
pub device: Device,
pub queue: Queue,
pub ntt_pipeline: ComputePipeline,
pub ntt_fused_pipeline: ComputePipeline,
pub ntt_tile_dit_no_bitreverse_pipeline: ComputePipeline,
pub ntt_tile_dif_pipeline: ComputePipeline,
pub ntt_tile_dif_fused_pipeline: ComputePipeline,
pub ntt_tile_fused_pointwise_pipeline: ComputePipeline,
pub ntt_global_stage_pipeline: ComputePipeline,
pub ntt_global_stage_radix4_pipeline: ComputePipeline,
pub ntt_global_stage_dif_pipeline: ComputePipeline,
pub ntt_global_stage_dif_fused_pointwise_pipeline: ComputePipeline,
pub ntt_bitreverse_pipeline: ComputePipeline,
pub ntt_bitreverse_fused_pointwise_pipeline: ComputePipeline,
pub coset_shift_pipeline: ComputePipeline,
pub pointwise_poly_pipeline: ComputePipeline,
pub to_montgomery_pipeline: ComputePipeline,
pub from_montgomery_pipeline: ComputePipeline,
pub msm_agg_g1_pipeline: ComputePipeline,
pub msm_sum_g1_pipeline: ComputePipeline,
pub msm_agg_g2_pipeline: ComputePipeline,
pub msm_sum_g2_pipeline: ComputePipeline,
pub msm_to_mont_g1_pipeline: ComputePipeline,
pub msm_to_mont_g2_pipeline: ComputePipeline,
pub msm_weight_g1_pipeline: ComputePipeline,
pub msm_subsum_phase1_g1_pipeline: ComputePipeline,
pub msm_subsum_phase2_g1_pipeline: ComputePipeline,
pub msm_weight_g2_pipeline: ComputePipeline,
pub msm_subsum_phase1_g2_pipeline: ComputePipeline,
pub msm_subsum_phase2_g2_pipeline: ComputePipeline,
pub msm_reduce_g1_pipeline: ComputePipeline,
pub msm_reduce_g2_pipeline: ComputePipeline,
pub ntt_bind_group_layout: BindGroupLayout,
pub ntt_fused_shift_bgl: BindGroupLayout,
pub ntt_params_bind_group_layout: BindGroupLayout,
pub coset_shift_bind_group_layout: BindGroupLayout,
pub pointwise_poly_bind_group_layout: BindGroupLayout,
pub pointwise_fused_bind_group_layout: BindGroupLayout,
pub montgomery_bind_group_layout: BindGroupLayout,
pub msm_agg_bind_group_layout: BindGroupLayout,
pub msm_sum_bind_group_layout: BindGroupLayout,
pub msm_weight_g1_bind_group_layout: BindGroupLayout,
pub msm_weight_g2_bind_group_layout: BindGroupLayout,
pub msm_subsum_phase1_bind_group_layout: BindGroupLayout,
pub msm_subsum_phase2_bind_group_layout: BindGroupLayout,
pub msm_reduce_bind_group_layout: BindGroupLayout,
/* private fields */
}Fields§
§device: Device§queue: Queue§ntt_pipeline: ComputePipeline§ntt_fused_pipeline: ComputePipeline§ntt_tile_dit_no_bitreverse_pipeline: ComputePipeline§ntt_tile_dif_pipeline: ComputePipeline§ntt_tile_dif_fused_pipeline: ComputePipeline§ntt_tile_fused_pointwise_pipeline: ComputePipeline§ntt_global_stage_pipeline: ComputePipeline§ntt_global_stage_radix4_pipeline: ComputePipeline§ntt_global_stage_dif_pipeline: ComputePipeline§ntt_global_stage_dif_fused_pointwise_pipeline: ComputePipeline§ntt_bitreverse_pipeline: ComputePipeline§ntt_bitreverse_fused_pointwise_pipeline: ComputePipeline§coset_shift_pipeline: ComputePipeline§pointwise_poly_pipeline: ComputePipeline§to_montgomery_pipeline: ComputePipeline§from_montgomery_pipeline: ComputePipeline§msm_agg_g1_pipeline: ComputePipeline§msm_sum_g1_pipeline: ComputePipeline§msm_agg_g2_pipeline: ComputePipeline§msm_sum_g2_pipeline: ComputePipeline§msm_to_mont_g1_pipeline: ComputePipeline§msm_to_mont_g2_pipeline: ComputePipeline§msm_weight_g1_pipeline: ComputePipeline§msm_subsum_phase1_g1_pipeline: ComputePipeline§msm_subsum_phase2_g1_pipeline: ComputePipeline§msm_weight_g2_pipeline: ComputePipeline§msm_subsum_phase1_g2_pipeline: ComputePipeline§msm_subsum_phase2_g2_pipeline: ComputePipeline§msm_reduce_g1_pipeline: ComputePipeline§msm_reduce_g2_pipeline: ComputePipeline§ntt_bind_group_layout: BindGroupLayout§ntt_fused_shift_bgl: BindGroupLayout§ntt_params_bind_group_layout: BindGroupLayout§coset_shift_bind_group_layout: BindGroupLayout§pointwise_poly_bind_group_layout: BindGroupLayout§pointwise_fused_bind_group_layout: BindGroupLayout§montgomery_bind_group_layout: BindGroupLayout§msm_agg_bind_group_layout: BindGroupLayout§msm_sum_bind_group_layout: BindGroupLayout§msm_weight_g1_bind_group_layout: BindGroupLayout§msm_weight_g2_bind_group_layout: BindGroupLayout§msm_subsum_phase1_bind_group_layout: BindGroupLayout§msm_subsum_phase2_bind_group_layout: BindGroupLayout§msm_reduce_bind_group_layout: BindGroupLayoutImplementations§
Source§impl<C: GpuCurve> GpuContext<C>
impl<C: GpuCurve> GpuContext<C>
pub async fn read_buffer( &self, buffer: &Buffer, size: BufferAddress, ) -> Result<Vec<u8>>
Sourcepub async fn read_buffers_batch(
&self,
entries: &[(&Buffer, BufferAddress)],
) -> Result<Vec<Vec<u8>>>
pub async fn read_buffers_batch( &self, entries: &[(&Buffer, BufferAddress)], ) -> Result<Vec<Vec<u8>>>
Reads multiple GPU buffers in a single command submission for efficiency.
All copy commands are batched into one encoder, submitted together, and then all staging buffers are mapped concurrently. This avoids the overhead of per-buffer submission and device polling.
Source§impl<C: GpuCurve> GpuContext<C>
impl<C: GpuCurve> GpuContext<C>
pub fn execute_h_pipeline(&self, bufs: &HPolyBuffers<'_>, n: u32)
Source§impl<C: GpuCurve> GpuContext<C>
impl<C: GpuCurve> GpuContext<C>
pub fn execute_msm( &self, is_g2: bool, bufs: &MsmBuffers<'_>, num_active_buckets: u32, num_dispatched: u32, has_chunks: bool, num_windows: u32, skip_montgomery: bool, )
Sourcepub fn convert_to_montgomery(&self, buf: &Buffer, is_g2: bool)
pub fn convert_to_montgomery(&self, buf: &Buffer, is_g2: bool)
Convert a bases buffer to Montgomery form in-place (one-time, for persistent bases).
Source§impl<C: GpuCurve> GpuContext<C>
impl<C: GpuCurve> GpuContext<C>
pub fn execute_to_montgomery(&self, buffer: &Buffer, num_elements: u32)
pub fn execute_from_montgomery(&self, buffer: &Buffer, num_elements: u32)
pub fn execute_ntt( &self, data_buffer: &Buffer, twiddles_buffer: &Buffer, num_elements: u32, )
Sourcepub fn execute_ntt_global(
&self,
data_buffer: &Buffer,
twiddles_buffer: &Buffer,
num_elements: u32,
)
pub fn execute_ntt_global( &self, data_buffer: &Buffer, twiddles_buffer: &Buffer, num_elements: u32, )
Multi-stage global NTT for sizes > NTT_TILE_SIZE (512).
Algorithm:
- Bit-reversal permutation (in-place)
- Iterative butterfly stages: for each
half_lenin 1, 2, 4, …, n/2, dispatches workgroups that combine pairs of elements using twiddle factors
Each stage updates a uniform buffer with [n, half_len, log_n, 0] so
the shader knows the current butterfly geometry.
pub fn execute_coset_shift( &self, data_buffer: &Buffer, shifts_buffer: &Buffer, num_elements: u32, )
pub fn execute_pointwise_poly( &self, a_buf: &Buffer, b_buf: &Buffer, c_buf: &Buffer, h_buf: &Buffer, z_invs_buf: &Buffer, num_elements: u32, )
Auto Trait Implementations§
impl<C> Freeze for GpuContext<C>
impl<C> !RefUnwindSafe for GpuContext<C>
impl<C> Send for GpuContext<C>where
C: Send,
impl<C> Sync for GpuContext<C>where
C: Sync,
impl<C> Unpin for GpuContext<C>where
C: Unpin,
impl<C> UnsafeUnpin for GpuContext<C>
impl<C> !UnwindSafe for GpuContext<C>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more
Source§impl<T> FmtForward for T
impl<T> FmtForward for T
Source§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
Causes
self to use its Binary implementation when Debug-formatted.Source§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
Causes
self to use its Display implementation when
Debug-formatted.Source§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
Causes
self to use its LowerExp implementation when
Debug-formatted.Source§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
Causes
self to use its LowerHex implementation when
Debug-formatted.Source§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
Causes
self to use its Octal implementation when Debug-formatted.Source§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
Causes
self to use its Pointer implementation when
Debug-formatted.Source§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
Causes
self to use its UpperExp implementation when
Debug-formatted.Source§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
Causes
self to use its UpperHex implementation when
Debug-formatted.Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
Converts
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
Source§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
Pipes by value. This is generally the method you want to use. Read more
Source§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
Borrows
self and passes that borrow into the pipe function. Read moreSource§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
Mutably borrows
self and passes that borrow into the pipe function. Read moreSource§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
Source§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
Source§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
Borrows
self, then passes self.as_ref() into the pipe function.Source§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
Mutably borrows
self, then passes self.as_mut() into the pipe
function.Source§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
Borrows
self, then passes self.deref() into the pipe function.Source§impl<T> Pointable for T
impl<T> Pointable for T
Source§impl<T> Tap for T
impl<T> Tap for T
Source§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Immutable access to the
Borrow<B> of a value. Read moreSource§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
Mutable access to the
BorrowMut<B> of a value. Read moreSource§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
Immutable access to the
AsRef<R> view of a value. Read moreSource§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
Mutable access to the
AsMut<R> view of a value. Read moreSource§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Immutable access to the
Deref::Target of a value. Read moreSource§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Mutable access to the
Deref::Target of a value. Read moreSource§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
Calls
.tap() only in debug builds, and is erased in release builds.Source§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
Calls
.tap_mut() only in debug builds, and is erased in release
builds.Source§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
Calls
.tap_borrow() only in debug builds, and is erased in release
builds.Source§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
Calls
.tap_borrow_mut() only in debug builds, and is erased in release
builds.Source§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
Calls
.tap_ref() only in debug builds, and is erased in release
builds.Source§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
Calls
.tap_ref_mut() only in debug builds, and is erased in release
builds.Source§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
Calls
.tap_deref() only in debug builds, and is erased in release
builds.