pub trait HybridWorkload: Send + Sync {
type Result;
// Required methods
fn workload_size(&self) -> usize;
fn execute_cpu(&self) -> Self::Result;
fn execute_gpu(&self) -> HybridResult<Self::Result>;
// Provided methods
fn name(&self) -> &str { ... }
fn supports_gpu(&self) -> bool { ... }
fn memory_estimate(&self) -> usize { ... }
}Expand description
Trait for workloads that can be executed on CPU or GPU.
Implementors provide both CPU and GPU execution paths, allowing the
HybridDispatcher to choose the optimal backend based on workload size
and runtime measurements.
§Example
use ringkernel_core::hybrid::{HybridWorkload, HybridResult};
struct VectorAdd {
a: Vec<f32>,
b: Vec<f32>,
}
impl HybridWorkload for VectorAdd {
type Result = Vec<f32>;
fn workload_size(&self) -> usize {
self.a.len()
}
fn execute_cpu(&self) -> Self::Result {
self.a.iter().zip(&self.b).map(|(a, b)| a + b).collect()
}
fn execute_gpu(&self) -> HybridResult<Self::Result> {
// GPU implementation
todo!("GPU kernel execution")
}
}Required Associated Types§
Required Methods§
Sourcefn workload_size(&self) -> usize
fn workload_size(&self) -> usize
Returns the size of the workload (number of elements to process).
This is used by the dispatcher to decide between CPU and GPU execution.
Sourcefn execute_cpu(&self) -> Self::Result
fn execute_cpu(&self) -> Self::Result
Executes the workload on CPU.
This should typically use Rayon or similar for parallel CPU execution.
Sourcefn execute_gpu(&self) -> HybridResult<Self::Result>
fn execute_gpu(&self) -> HybridResult<Self::Result>
Executes the workload on GPU.
Returns an error if GPU execution fails.
Provided Methods§
Sourcefn supports_gpu(&self) -> bool
fn supports_gpu(&self) -> bool
Returns whether GPU execution is supported.
Override to return false if this workload doesn’t have a GPU implementation.
Sourcefn memory_estimate(&self) -> usize
fn memory_estimate(&self) -> usize
Returns an estimate of memory bytes required for this workload.
Used by the resource guard to prevent OOM situations.