use crate::{
compat::*,
cuda::*,
error::{CudaKernelError, Result},
kernel::Kernels,
kernels::macros::ops,
source::Source,
};
ops!(sum, max, min, prod, mean, norm, argmax, argmin, all, any);
pub fn call_ops_reduce<T, O>(
kernel: crate::kernels::macros::Kernel,
kernels: &Kernels,
context: &Arc<CudaContext>,
input: &CudaSlice<T>,
output: &mut CudaSlice<O>,
metadata: &[usize],
) -> Result<()>
where
T: cudarc::driver::DeviceRepr,
O: cudarc::driver::DeviceRepr,
{
let func = kernels.load_function(context, Source::OpsReduce, kernel.0)?;
let num_dims = metadata[0];
let output_shape_len = metadata[2 + 2 * num_dims];
let output_shape_start = 3 + 2 * num_dims;
let num_els: usize = metadata[output_shape_start..output_shape_start + output_shape_len]
.iter()
.product();
let block_size = 256u32;
let grid_size = (num_els as u32).div_ceil(block_size).max(1);
let cfg = LaunchConfig {
grid_dim: (grid_size, 1, 1),
block_dim: (block_size, 1, 1),
shared_mem_bytes: 0,
};
let stream = context.default_stream();
let metadata_dev = stream
.memcpy_stod(metadata)
.map_err(|e| CudaKernelError::MemoryError(format!("Failed to copy metadata: {:?}", e)))?;
unsafe {
func.launch(&stream, cfg, |args| {
args.arg(input).arg(output).arg(&metadata_dev);
})
.map_err(|e| CudaKernelError::LaunchError(format!("Failed to launch kernel: {:?}", e)))?;
}
Ok(())
}