pub struct SharedGpuContext { /* private fields */ }Expand description
Shared GPU context for efficient resource management across all crates Now supports both single-GPU (legacy) and multi-GPU operations
Implementations§
Sourcepub async fn global() -> UnifiedGpuResult<&'static Self>
pub async fn global() -> UnifiedGpuResult<&'static Self>
Get the global shared GPU context (singleton pattern) Note: This creates a new context each time for now. In production, this would be a proper singleton with atomic initialization.
Sourcepub async fn with_multi_gpu() -> UnifiedGpuResult<Self>
pub async fn with_multi_gpu() -> UnifiedGpuResult<Self>
Create a new shared GPU context with multi-GPU support
Sourcepub fn adapter_info(&self) -> &AdapterInfo
pub fn adapter_info(&self) -> &AdapterInfo
Get adapter info
Sourcepub fn get_buffer(
&self,
size: u64,
usage: BufferUsages,
label: Option<&str>,
) -> Buffer
pub fn get_buffer( &self, size: u64, usage: BufferUsages, label: Option<&str>, ) -> Buffer
Get or create a buffer from the pool
Sourcepub fn return_buffer(&self, buffer: Buffer, size: u64, usage: BufferUsages)
pub fn return_buffer(&self, buffer: Buffer, size: u64, usage: BufferUsages)
Return a buffer to the pool for reuse
Sourcepub fn get_compute_pipeline(
&self,
shader_key: &str,
shader_source: &str,
entry_point: &str,
) -> UnifiedGpuResult<Arc<ComputePipeline>>
pub fn get_compute_pipeline( &self, shader_key: &str, shader_source: &str, entry_point: &str, ) -> UnifiedGpuResult<Arc<ComputePipeline>>
Get or create a compute pipeline from cache
Sourcepub fn buffer_pool_stats(&self) -> BufferPoolStats
pub fn buffer_pool_stats(&self) -> BufferPoolStats
Get buffer pool statistics
Sourcepub fn get_optimal_workgroup(
&self,
operation: &str,
data_size: usize,
) -> (u32, u32, u32)
pub fn get_optimal_workgroup( &self, operation: &str, data_size: usize, ) -> (u32, u32, u32)
Get optimal workgroup configuration for given operation type and data size
Sourcepub fn get_workgroup_declaration(
&self,
operation: &str,
data_size: usize,
) -> String
pub fn get_workgroup_declaration( &self, operation: &str, data_size: usize, ) -> String
Generate optimized WGSL workgroup declaration for operation
Sourcepub fn is_multi_gpu_enabled(&self) -> bool
pub fn is_multi_gpu_enabled(&self) -> bool
Check if multi-GPU mode is enabled
Sourcepub async fn device_count(&self) -> usize
pub async fn device_count(&self) -> usize
Get the number of available GPU devices
Sourcepub async fn get_device_info(&self) -> Vec<(DeviceId, String, String)>
pub async fn get_device_info(&self) -> Vec<(DeviceId, String, String)>
Get information about all GPU devices
Sourcepub async fn get_device(&self, device_id: DeviceId) -> Option<Arc<GpuDevice>>
pub async fn get_device(&self, device_id: DeviceId) -> Option<Arc<GpuDevice>>
Get a specific GPU device by ID
Sourcepub async fn optimal_device_for_operation(
&self,
operation: &str,
_data_size: usize,
) -> DeviceId
pub async fn optimal_device_for_operation( &self, operation: &str, _data_size: usize, ) -> DeviceId
Get the optimal device for a specific operation
Sourcepub async fn distribute_workload(
&self,
workload: Workload,
) -> UnifiedGpuResult<Vec<DeviceWorkload>>
pub async fn distribute_workload( &self, workload: Workload, ) -> UnifiedGpuResult<Vec<DeviceWorkload>>
Distribute a workload across multiple GPUs
Sourcepub async fn execute_multi_gpu_workload(
&self,
workload_id: String,
workload: Workload,
) -> UnifiedGpuResult<Vec<Vec<u8>>>
pub async fn execute_multi_gpu_workload( &self, workload_id: String, workload: Workload, ) -> UnifiedGpuResult<Vec<Vec<u8>>>
Execute a workload on multiple GPUs and aggregate results
Sourcepub async fn get_gpu_utilization(&self) -> HashMap<DeviceId, f32>
pub async fn get_gpu_utilization(&self) -> HashMap<DeviceId, f32>
Get real-time GPU utilization across all devices
Sourcepub async fn get_multi_gpu_stats(&self) -> MultiGpuStats
pub async fn get_multi_gpu_stats(&self) -> MultiGpuStats
Get performance statistics for multi-GPU operations
Sourcepub async fn set_load_balancing_strategy(
&self,
_strategy: LoadBalancingStrategy,
) -> UnifiedGpuResult<()>
pub async fn set_load_balancing_strategy( &self, _strategy: LoadBalancingStrategy, ) -> UnifiedGpuResult<()>
Set load balancing strategy for multi-GPU operations
Sourcepub async fn add_gpu_device(
&self,
device: Arc<GpuDevice>,
) -> UnifiedGpuResult<()>
pub async fn add_gpu_device( &self, device: Arc<GpuDevice>, ) -> UnifiedGpuResult<()>
Add a new GPU device to the multi-GPU context (hot-plugging support)
Sourcepub async fn remove_gpu_device(
&self,
device_id: DeviceId,
) -> UnifiedGpuResult<()>
pub async fn remove_gpu_device( &self, device_id: DeviceId, ) -> UnifiedGpuResult<()>
Remove a GPU device from the multi-GPU context
Trait Implementations§
Source§fn clone(&self) -> SharedGpuContext
fn clone(&self) -> SharedGpuContext
1.0.0 · Source§fn clone_from(&mut self, source: &Self)
fn clone_from(&mut self, source: &Self)
source. Read moreAuto Trait Implementations§
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
impl<SS, SP> SupersetOf<SS> for SPwhere
SS: SubsetOf<SP>,
Source§fn to_subset(&self) -> Option<SS>
fn to_subset(&self) -> Option<SS>
self from the equivalent element of its
superset. Read moreSource§fn is_in_subset(&self) -> bool
fn is_in_subset(&self) -> bool
self is actually part of its subset T (and can be converted to it).Source§fn to_subset_unchecked(&self) -> SS
fn to_subset_unchecked(&self) -> SS
self.to_subset but without any property checks. Always succeeds.Source§fn from_subset(element: &SS) -> SP
fn from_subset(element: &SS) -> SP
self to the equivalent element of its superset.