#[cfg(feature = "webgpu")]
#[allow(unused_imports)]
use wgpu;
#[cfg(feature = "webgpu")]
pub mod backend;
#[cfg(feature = "webgpu")]
pub mod buffer;
#[cfg(feature = "webgpu")]
pub mod device;
#[cfg(feature = "webgpu")]
pub mod error;
#[cfg(feature = "webgpu")]
pub mod kernels;
#[cfg(feature = "webgpu")]
pub mod memory;
#[cfg(feature = "webgpu")]
pub mod multi_device;
#[cfg(feature = "webgpu")]
pub mod pipeline;
#[cfg(feature = "webgpu")]
pub mod shader;
#[cfg(feature = "webgpu")]
pub use backend::{WebGpuBackend, WebGpuBackendBuilder};
#[cfg(feature = "webgpu")]
pub use buffer::{WebGpuBuffer, WebGpuBufferPool};
#[cfg(feature = "webgpu")]
pub use device::{
DeviceMemoryInfo, FeatureCompatibilityReport, WebGpuDevice, WebGpuDeviceBuilder,
WebGpuDeviceCapabilities,
};
#[cfg(feature = "webgpu")]
pub use error::{WebGpuError, WebGpuResult};
#[cfg(feature = "webgpu")]
pub use kernels::{WebGpuComputePipeline, WebGpuKernel, WebGpuKernelCache, WebGpuKernelExecutor};
#[cfg(feature = "webgpu")]
pub use memory::{WebGpuMemoryManager, WebGpuMemoryPool};
#[cfg(feature = "webgpu")]
pub use multi_device::{
DeviceAssignment, DeviceFilter, DeviceMetrics, DeviceSelectionContext, LoadBalancingStrategy,
ManagerStats, MultiDeviceConfig, MultiDeviceWebGpuManager, PerformanceMonitor, SystemMetrics,
WorkDistributionPlan, WorkGranularity, WorkPartition, WorkPriority,
};
pub use pipeline::{ComputePipeline, PipelineCache, PipelineFactory};
pub use shader::{ShaderCompiler, ShaderModule, ShaderSource};
use parking_lot::RwLock;
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct AdapterInfo {
pub name: String,
pub vendor: u32,
pub device: u32,
pub device_type: wgpu::DeviceType,
pub driver_info: String,
pub backend: wgpu::Backend,
}
impl From<wgpu::AdapterInfo> for AdapterInfo {
fn from(info: wgpu::AdapterInfo) -> Self {
Self {
name: info.name,
vendor: info.vendor,
device: info.device,
device_type: info.device_type,
driver_info: info.driver_info,
backend: info.backend,
}
}
}
static WEBGPU_INSTANCE: RwLock<Option<Arc<wgpu::Instance>>> = RwLock::new(None);
pub async fn init() -> WebGpuResult<()> {
let mut instance_lock = WEBGPU_INSTANCE.write();
if instance_lock.is_none() {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
flags: wgpu::InstanceFlags::default(),
memory_budget_thresholds: wgpu::MemoryBudgetThresholds::default(),
backend_options: wgpu::BackendOptions::default(),
display: None,
});
*instance_lock = Some(Arc::new(instance));
}
Ok(())
}
pub fn instance() -> Arc<wgpu::Instance> {
let instance_lock = WEBGPU_INSTANCE.read();
instance_lock
.as_ref()
.expect("WebGPU instance not initialized - call init() first")
.clone()
}
pub fn is_available() -> bool {
cfg!(feature = "webgpu") && {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::all(),
flags: wgpu::InstanceFlags::default(),
memory_budget_thresholds: wgpu::MemoryBudgetThresholds::default(),
backend_options: wgpu::BackendOptions::default(),
display: None,
});
let result = std::thread::spawn(move || {
let rt = match tokio::runtime::Runtime::new() {
Ok(rt) => rt,
Err(_) => return false,
};
let adapters = rt.block_on(instance.enumerate_adapters(wgpu::Backends::all()));
!adapters.is_empty()
})
.join()
.unwrap_or(false);
result
}
}
pub async fn enumerate_adapters() -> WebGpuResult<Vec<wgpu::Adapter>> {
if !is_available() {
return Err(WebGpuError::NotAvailable);
}
init().await?;
let instance = instance();
let adapters = instance.enumerate_adapters(wgpu::Backends::all()).await;
Ok(adapters)
}
pub async fn get_best_adapter() -> WebGpuResult<wgpu::Adapter> {
if !is_available() {
return Err(WebGpuError::NotAvailable);
}
init().await?;
let instance = instance();
if let Ok(adapter) = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: None,
force_fallback_adapter: false,
})
.await
{
return Ok(adapter);
}
if let Ok(adapter) = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::None,
compatible_surface: None,
force_fallback_adapter: false,
})
.await
{
return Ok(adapter);
}
Err(WebGpuError::NoAdapterFound)
}
pub fn get_adapter_info(adapter: &wgpu::Adapter) -> AdapterInfo {
adapter.get_info().into()
}
pub fn device_count() -> Option<usize> {
if !is_available() {
return Some(0);
}
let rt = match tokio::runtime::Runtime::new() {
Ok(rt) => rt,
Err(_) => return Some(0),
};
rt.block_on(async {
match enumerate_adapters().await {
Ok(adapters) => Some(adapters.len()),
Err(_) => Some(0),
}
})
}
#[derive(Debug, Clone)]
pub struct WebGpuBackendConfig {
pub adapter_index: Option<usize>,
pub power_preference: wgpu::PowerPreference,
pub debug_mode: bool,
pub max_buffer_size: u64,
pub enable_pipeline_cache: bool,
pub preferred_workgroup_size: (u32, u32, u32),
}
impl Default for WebGpuBackendConfig {
fn default() -> Self {
Self {
adapter_index: None,
power_preference: wgpu::PowerPreference::HighPerformance,
debug_mode: false,
max_buffer_size: 2 * 1024 * 1024 * 1024, enable_pipeline_cache: true,
preferred_workgroup_size: (64, 1, 1),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_webgpu_availability() {
println!("WebGPU available: {}", is_available());
if is_available() {
let result = init().await;
assert!(result.is_ok());
let adapters = enumerate_adapters().await;
if let Ok(adapters) = adapters {
println!("Found {} WebGPU adapters", adapters.len());
for (i, adapter) in adapters.iter().enumerate() {
let info = get_adapter_info(adapter);
println!("Adapter {}: {} ({:?})", i, info.name, info.device_type);
}
}
}
}
#[tokio::test]
async fn test_best_adapter() {
if is_available() {
let _ = init().await;
let result = get_best_adapter().await;
if let Ok(adapter) = result {
let info = get_adapter_info(&adapter);
println!("Best adapter: {} ({:?})", info.name, info.device_type);
}
}
}
#[test]
fn test_backend_config() {
let config = WebGpuBackendConfig::default();
assert_eq!(
config.power_preference,
wgpu::PowerPreference::HighPerformance
);
assert!(!config.debug_mode);
assert_eq!(config.max_buffer_size, 2 * 1024 * 1024 * 1024);
assert!(config.enable_pipeline_cache);
assert_eq!(config.preferred_workgroup_size, (64, 1, 1));
}
}