pub struct Kernel<'a> {
pub builder: ExecuteKernel<'a>,
/* private fields */
}
Expand description
A kernel that can be executed.
Fields§
§builder: ExecuteKernel<'a>
The underlying kernel builder.
Implementations§
Source§impl<'a> Kernel<'a>
impl<'a> Kernel<'a>
Sourcepub fn arg<T: KernelArgument>(self, t: &'a T) -> Self
pub fn arg<T: KernelArgument>(self, t: &'a T) -> Self
Set a kernel argument.
The arguments must live as long as the kernel. Hence make sure they are not dropped as long as the kernel is in use.
Example where this behaviour is enforced and leads to a compile-time error:
ⓘ
use rust_gpu_tools::opencl::Program;
fn would_break(program: &Program) {
let data = vec![1, 2, 3, 4];
let buffer = program.create_buffer_from_slice(&data).unwrap();
let kernel = program.create_kernel("my_kernel", 4, 256).unwrap();
let kernel = kernel.arg(&buffer);
// This drop wouldn't error if the arguments wouldn't be bound to the kernels lifetime.
drop(buffer);
kernel.run().unwrap();
}
Examples found in repository?
examples/add.rs (line 45)
21pub fn main() {
22 // Define some data that should be operated on.
23 let aa: Vec<u32> = vec![1, 2, 3, 4];
24 let bb: Vec<u32> = vec![5, 6, 7, 8];
25
26 // This is the core. Here we write the interaction with the GPU independent of whether it is
27 // CUDA or OpenCL.
28 let closures = program_closures!(|program, _args| -> Result<Vec<u32>, GPUError> {
29 // Make sure the input data has the same length.
30 assert_eq!(aa.len(), bb.len());
31 let length = aa.len();
32
33 // Copy the data to the GPU.
34 let aa_buffer = program.create_buffer_from_slice(&aa)?;
35 let bb_buffer = program.create_buffer_from_slice(&bb)?;
36
37 // The result buffer has the same length as the input buffers.
38 let result_buffer = unsafe { program.create_buffer::<u32>(length)? };
39
40 // Get the kernel.
41 let kernel = program.create_kernel("add", 1, 1)?;
42
43 // Execute the kernel.
44 kernel
45 .arg(&(length as u32))
46 .arg(&aa_buffer)
47 .arg(&bb_buffer)
48 .arg(&result_buffer)
49 .run()?;
50
51 // Get the resulting data.
52 let mut result = vec![0u32; length];
53 program.read_into_buffer(&result_buffer, &mut result)?;
54
55 Ok(result)
56 });
57
58 // Get the first available device.
59 let device = *Device::all().first().unwrap();
60
61 // First we run it on CUDA.
62 let cuda_program = cuda(device);
63 let cuda_result = cuda_program.run(closures, ()).unwrap();
64 assert_eq!(cuda_result, [6, 8, 10, 12]);
65 println!("CUDA result: {:?}", cuda_result);
66
67 // Then we run it on OpenCL.
68 let opencl_program = opencl(device);
69 let opencl_result = opencl_program.run(closures, ()).unwrap();
70 assert_eq!(opencl_result, [6, 8, 10, 12]);
71 println!("OpenCL result: {:?}", opencl_result);
72}
Sourcepub fn run(self) -> Result<(), GPUError>
pub fn run(self) -> Result<(), GPUError>
Actually run the kernel.
Examples found in repository?
examples/add.rs (line 49)
21pub fn main() {
22 // Define some data that should be operated on.
23 let aa: Vec<u32> = vec![1, 2, 3, 4];
24 let bb: Vec<u32> = vec![5, 6, 7, 8];
25
26 // This is the core. Here we write the interaction with the GPU independent of whether it is
27 // CUDA or OpenCL.
28 let closures = program_closures!(|program, _args| -> Result<Vec<u32>, GPUError> {
29 // Make sure the input data has the same length.
30 assert_eq!(aa.len(), bb.len());
31 let length = aa.len();
32
33 // Copy the data to the GPU.
34 let aa_buffer = program.create_buffer_from_slice(&aa)?;
35 let bb_buffer = program.create_buffer_from_slice(&bb)?;
36
37 // The result buffer has the same length as the input buffers.
38 let result_buffer = unsafe { program.create_buffer::<u32>(length)? };
39
40 // Get the kernel.
41 let kernel = program.create_kernel("add", 1, 1)?;
42
43 // Execute the kernel.
44 kernel
45 .arg(&(length as u32))
46 .arg(&aa_buffer)
47 .arg(&bb_buffer)
48 .arg(&result_buffer)
49 .run()?;
50
51 // Get the resulting data.
52 let mut result = vec![0u32; length];
53 program.read_into_buffer(&result_buffer, &mut result)?;
54
55 Ok(result)
56 });
57
58 // Get the first available device.
59 let device = *Device::all().first().unwrap();
60
61 // First we run it on CUDA.
62 let cuda_program = cuda(device);
63 let cuda_result = cuda_program.run(closures, ()).unwrap();
64 assert_eq!(cuda_result, [6, 8, 10, 12]);
65 println!("CUDA result: {:?}", cuda_result);
66
67 // Then we run it on OpenCL.
68 let opencl_program = opencl(device);
69 let opencl_result = opencl_program.run(closures, ()).unwrap();
70 assert_eq!(opencl_result, [6, 8, 10, 12]);
71 println!("OpenCL result: {:?}", opencl_result);
72}
Trait Implementations§
Auto Trait Implementations§
impl<'a> Freeze for Kernel<'a>
impl<'a> RefUnwindSafe for Kernel<'a>
impl<'a> !Send for Kernel<'a>
impl<'a> !Sync for Kernel<'a>
impl<'a> Unpin for Kernel<'a>
impl<'a> UnwindSafe for Kernel<'a>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more