rust_gpu_tools/lib.rs
1//! Abstraction layer for OpenCL and CUDA.
2//!
3//! Feature flags
4//! -------------
5//!
6//! There are two [feature flags], `cuda` and `opencl`. By default `opencl` is enabled. You can
7//! enable both at the same time. At least one of them needs to be enabled at any time.
8//!
9//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
10
11#![warn(missing_docs)]
12
13mod device;
14mod error;
15#[cfg(any(feature = "cuda", feature = "opencl"))]
16mod program;
17
18#[cfg(feature = "cuda")]
19pub mod cuda;
20#[cfg(feature = "opencl")]
21pub mod opencl;
22
23pub use device::{Device, DeviceUuid, Framework, PciId, UniqueId, Vendor};
24pub use error::GPUError;
25#[cfg(any(feature = "cuda", feature = "opencl"))]
26pub use program::Program;
27
28#[cfg(not(any(feature = "cuda", feature = "opencl")))]
29compile_error!("At least one of the features `cuda` or `opencl` must be enabled.");
30
31/// A buffer on the GPU.
32///
33/// The concept of a local buffer is from OpenCL. In CUDA you don't allocate a buffer directly
34/// via API call. Instead you pass in the amount of shared memory that should be used.
35///
36/// There can be at most a single local buffer per kernel. On CUDA a null pointer will be passed
37/// in, instead of an actual value. The memory that should get allocated is then passed into the
38/// kernel call automatically.
39#[derive(Debug)]
40pub struct LocalBuffer<T> {
41 /// The number of T sized elements.
42 length: usize,
43 _phantom: std::marker::PhantomData<T>,
44}
45
46impl<T> LocalBuffer<T> {
47 /// Returns a new buffer of the specified `length`.
48 pub fn new(length: usize) -> Self {
49 LocalBuffer::<T> {
50 length,
51 _phantom: std::marker::PhantomData,
52 }
53 }
54}