#![cfg_attr(docsrs, feature(doc_cfg))]
mod enums;
pub use enums::*;
use lazy_static::lazy_static;
lazy_static! {
pub static ref CUDA_SUPPORTED: bool = {
unsafe {
nvtt_sys::nvttUseCurrentDevice();
nvtt_sys::nvttIsCudaSupported().into()
}
};
}
pub fn version() -> u32 {
unsafe { nvtt_sys::nvttVersion() }
}
use nvtt_sys::NvttCompressionOptions;
pub struct CompressionOptions(*mut NvttCompressionOptions);
impl CompressionOptions {
pub fn new() -> Self {
unsafe {
let ptr = nvtt_sys::nvttCreateCompressionOptions();
if ptr.is_null() {
panic!("failed to allocate");
} else {
Self(ptr)
}
}
}
pub fn reset(&mut self) {
unsafe { nvtt_sys::nvttResetCompressionOptions(self.0) }
}
pub fn set_format(&mut self, format: Format) {
unsafe { nvtt_sys::nvttSetCompressionOptionsFormat(self.0, format.into()) }
}
pub fn set_quality(&mut self, quality: Quality) {
unsafe { nvtt_sys::nvttSetCompressionOptionsQuality(self.0, quality.into()) }
}
pub fn set_color_weights(&mut self, red: f32, green: f32, blue: f32, alpha: Option<f32>) {
let alpha = alpha.unwrap_or(1.0);
unsafe { nvtt_sys::nvttSetCompressionOptionsColorWeights(self.0, red, green, blue, alpha) }
}
pub unsafe fn set_pixel_format(
&mut self,
bitcount: u32,
rmask: u32,
gmask: u32,
bmask: u32,
amask: u32,
) {
unsafe {
nvtt_sys::nvttSetCompressionOptionsPixelFormat(
self.0, bitcount, rmask, gmask, bmask, amask,
)
}
}
pub fn set_pixel_type(&mut self, pixel_type: PixelType) {
unsafe { nvtt_sys::nvttSetCompressionOptionsPixelType(self.0, pixel_type.into()) }
}
pub fn set_pitch_alignment(&mut self, pitch_alignment: i32) {
unsafe { nvtt_sys::nvttSetCompressionOptionsPitchAlignment(self.0, pitch_alignment) }
}
pub fn set_quantization(
&mut self,
color_dithering: bool,
alpha_dithering: bool,
binary_alpha: bool,
alpha_threshold: Option<i32>,
) {
let alpha_threshold = alpha_threshold.unwrap_or(127);
unsafe {
nvtt_sys::nvttSetCompressionOptionsQuantization(
self.0,
color_dithering.into(),
alpha_dithering.into(),
binary_alpha.into(),
alpha_threshold,
)
}
}
pub fn d3d9_format(&self) -> u32 {
unsafe { nvtt_sys::nvttGetCompressionOptionsD3D9Format(self.0) }
}
}
impl Default for CompressionOptions {
fn default() -> Self {
Self::new()
}
}
impl Drop for CompressionOptions {
fn drop(&mut self) {
unsafe {
nvtt_sys::nvttDestroyCompressionOptions(self.0);
}
}
}
macro_rules! make_thread_local {
($buffer:ident) => {
std::thread_local! {
static $buffer: core::cell::RefCell<Vec<u8>> = core::cell::RefCell::new(Vec::new());
}
extern "C" fn output_callback(
data_ptr: *const libc::c_void,
len: libc::c_int,
) -> nvtt_sys::NvttBoolean {
let len = len as usize;
let data = unsafe { std::slice::from_raw_parts(data_ptr as *const u8, len) };
$buffer.with(|b| b.borrow_mut().extend_from_slice(data));
true.into()
}
$buffer.with(|b| b.borrow_mut().clear());
};
}
macro_rules! write_output {
($buffer:ident, $func:ident, $output_options:expr, $($arg:expr),* $(,)?) => {
unsafe {
nvtt_sys::nvttSetOutputOptionsOutputHandler($output_options.0, None, Some(output_callback), None);
let res: bool = $func($($arg,)*).into();
res.then_some($buffer.with(|b| b.replace(Vec::new())))
}
};
}
use nvtt_sys::NvttContext;
pub struct Context(*mut NvttContext);
impl Context {
pub fn new() -> Self {
unsafe {
nvtt_sys::nvttUseCurrentDevice();
let ptr = nvtt_sys::nvttCreateContext();
if ptr.is_null() {
panic!("failed to allocate");
} else {
nvtt_sys::nvttSetContextCudaAcceleration(ptr, false.into());
Self(ptr)
}
}
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn set_cuda_acceleration(&mut self, enable: bool) {
if *CUDA_SUPPORTED {
unsafe { nvtt_sys::nvttSetContextCudaAcceleration(self.0, enable.into()) }
} else {
panic!("cuda is not supported");
}
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn is_cuda_acceleration_enabled(&self) -> bool {
if !*CUDA_SUPPORTED {
false
} else {
unsafe { nvtt_sys::nvttContextIsCudaAccelerationEnabled(self.0).into() }
}
}
#[must_use]
pub fn output_header(
&self,
img: &Surface,
mipmap_count: u32,
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
let func = nvtt_sys::nvttContextOutputHeader;
make_thread_local!(BUFFER);
write_output!(
BUFFER,
func,
output_options,
self.0,
img.0,
mipmap_count as i32,
compression_options.0,
output_options.0,
)
}
#[must_use]
pub fn output_header_cube(
&self,
cube: &CubeSurface,
mipmap_count: u32,
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
let func = nvtt_sys::nvttContextOutputHeaderCube;
make_thread_local!(CUBE_HEADER_BUFFER);
write_output!(
CUBE_HEADER_BUFFER,
func,
output_options,
self.0,
cube.0,
mipmap_count as i32,
compression_options.0,
output_options.0,
)
}
#[must_use]
#[allow(clippy::too_many_arguments)]
pub fn output_header_data(
&self,
tex_type: TextureType,
w: u32,
h: u32,
d: u32,
mipmap_count: u32,
is_normal_map: bool,
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
let func = nvtt_sys::nvttContextOutputHeaderData;
make_thread_local!(DATA_HEADER_BUFFER);
write_output!(
DATA_HEADER_BUFFER,
func,
output_options,
self.0,
tex_type.into(),
w as i32,
h as i32,
d as i32,
mipmap_count as i32,
is_normal_map.into(),
compression_options.0,
output_options.0,
)
}
pub fn compress(
&self,
img: &Surface,
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
let face = 0;
let mipmap = 0;
let func = nvtt_sys::nvttContextCompress;
make_thread_local!(BUFFER);
write_output!(
BUFFER,
func,
output_options,
self.0,
img.0,
face,
mipmap,
compression_options.0,
output_options.0,
)
}
pub fn compress_cube(
&self,
cube: &CubeSurface,
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
let mipmap = 0;
let func = nvtt_sys::nvttContextCompressCube;
make_thread_local!(CUBE_BUFFER);
write_output!(
CUBE_BUFFER,
func,
output_options,
self.0,
cube.0,
mipmap,
compression_options.0,
output_options.0,
)
}
pub fn compress_data(
&self,
w: u32,
h: u32,
d: u32,
rgba: &[f32],
compression_options: &CompressionOptions,
output_options: &OutputOptions,
) -> Option<Vec<u8>> {
if w * h * d < rgba.len() as u32 {
panic!("rgba does match dimensions");
}
let face = 0;
let mipmap = 0;
let func = nvtt_sys::nvttContextCompressData;
make_thread_local!(DATA_BUFFER);
write_output!(
DATA_BUFFER,
func,
output_options,
self.0,
w as i32,
h as i32,
d as i32,
face,
mipmap,
rgba.as_ptr(),
compression_options.0,
output_options.0,
)
}
pub fn estimate_size(
&self,
img: &Surface,
mipmap_count: u32,
compression_options: &CompressionOptions,
) -> u32 {
unsafe {
nvtt_sys::nvttContextEstimateSize(
self.0,
img.0,
mipmap_count as i32,
compression_options.0,
) as u32
}
}
pub fn estimate_size_cube(
&self,
cube: &CubeSurface,
mipmap_count: u32,
compression_options: &CompressionOptions,
) -> u32 {
unsafe {
nvtt_sys::nvttContextEstimateSizeCube(
self.0,
cube.0,
mipmap_count as i32,
compression_options.0,
) as u32
}
}
pub fn estimate_size_data(
&self,
w: u32,
h: u32,
d: u32,
mipmap_count: u32,
compression_options: &CompressionOptions,
) -> u32 {
unsafe {
nvtt_sys::nvttContextEstimateSizeData(
self.0,
w as i32,
h as i32,
d as i32,
mipmap_count as i32,
compression_options.0,
) as u32
}
}
}
impl Default for Context {
fn default() -> Self {
Self::new()
}
}
impl Drop for Context {
fn drop(&mut self) {
unsafe {
nvtt_sys::nvttDestroyContext(self.0);
}
}
}
use nvtt_sys::NvttCubeSurface;
pub struct CubeSurface(*mut NvttCubeSurface);
impl CubeSurface {
pub fn fold(img: &Surface, layout: CubeLayout) -> Self {
if img.depth() > 1 {
panic!("3D surface was provided");
}
if !layout.dim_supported(img.width(), img.height()) {
panic!("layout does not support dimensions of img");
}
unsafe {
let ptr = nvtt_sys::nvttCreateCubeSurface();
if ptr.is_null() {
panic!("failed to allocate");
}
nvtt_sys::nvttCubeSurfaceFold(ptr, img.0, layout.into());
Self(ptr)
}
}
pub fn unfold(&self, layout: CubeLayout) -> Surface {
unsafe {
let ptr = nvtt_sys::nvttCubeSurfaceUnfold(self.0, layout.into());
if ptr.is_null() {
panic!("failed to allocate");
}
Surface(ptr)
}
}
pub fn face(&self, face: CubeFace) -> &Surface {
unsafe { &*(nvtt_sys::nvttCubeSurfaceFace(self.0, face as i32) as *const Surface) }
}
pub fn edge_length(&self) -> u32 {
unsafe { nvtt_sys::nvttCubeSurfaceEdgeLength(self.0) as u32 }
}
pub fn count_mipmaps(&self) -> u32 {
unsafe { nvtt_sys::nvttCubeSurfaceCountMipmaps(self.0) as u32 }
}
pub fn average(&self, channel: Channel) -> f32 {
unsafe { nvtt_sys::nvttCubeSurfaceAverage(self.0, channel as i32) }
}
pub fn range(&self, channel: Channel) -> (f32, f32) {
let mut min: f32 = 0.0;
let mut max: f32 = 0.0;
unsafe {
nvtt_sys::nvttCubeSurfaceRange(
self.0,
channel as i32,
&mut min as *mut _,
&mut max as *mut _,
);
(min, max)
}
}
pub fn clamp(&mut self, channel: Channel, low: f32, high: f32) {
unsafe {
nvtt_sys::nvttCubeSurfaceClamp(self.0, channel as i32, low, high);
}
}
pub fn to_gamma(&mut self, gamma: f32) {
unsafe { nvtt_sys::nvttCubeSurfaceToGamma(self.0, gamma) }
}
pub fn from_gamma(&mut self, gamma: f32) {
unsafe { nvtt_sys::nvttCubeSurfaceToLinear(self.0, gamma) }
}
pub fn cosine_power_filter(
&self,
size: u32,
cosine_power: f32,
fixup_method: EdgeFixup,
) -> Self {
if size == 0 {
panic!("size cannot be zero");
}
unsafe {
let ptr = nvtt_sys::nvttCubeSurfaceCosinePowerFilter(
self.0,
size as i32,
cosine_power,
fixup_method.into(),
);
if ptr.is_null() {
panic!("failed to allocate");
}
Self(ptr)
}
}
pub fn fast_resample(&self, size: u32, fixup_method: EdgeFixup) -> Self {
unsafe {
let ptr =
nvtt_sys::nvttCubeSurfaceFastResample(self.0, size as i32, fixup_method.into());
if ptr.is_null() {
panic!("failed to allocate");
}
Self(ptr)
}
}
}
impl Drop for CubeSurface {
fn drop(&mut self) {
unsafe {
nvtt_sys::nvttDestroyCubeSurface(self.0);
}
}
}
use nvtt_sys::NvttOutputOptions;
pub struct OutputOptions(*mut NvttOutputOptions);
impl OutputOptions {
pub fn new() -> Self {
unsafe {
let ptr = nvtt_sys::nvttCreateOutputOptions();
if ptr.is_null() {
panic!("failed to allocate");
}
Self(ptr)
}
}
pub fn set_output_header(&mut self, output_header: bool) {
unsafe { nvtt_sys::nvttSetOutputOptionsOutputHeader(self.0, output_header.into()) }
}
pub fn set_container(&mut self, container: Container) {
unsafe { nvtt_sys::nvttSetOutputOptionsContainer(self.0, container.into()) }
}
pub fn set_user_version(&mut self, version: i32) {
unsafe { nvtt_sys::nvttSetOutputOptionsUserVersion(self.0, version) }
}
pub fn set_srgb_flag(&mut self, b: bool) {
unsafe { nvtt_sys::nvttSetOutputOptionsSrgbFlag(self.0, b.into()) }
}
}
impl Default for OutputOptions {
fn default() -> Self {
Self::new()
}
}
impl Drop for OutputOptions {
fn drop(&mut self) {
unsafe {
nvtt_sys::nvttDestroyOutputOptions(self.0);
}
}
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)]
pub struct BoundingBox {
pub min_x: u32,
pub max_x: u32,
pub min_y: u32,
pub max_y: u32,
pub min_z: u32,
pub max_z: u32,
}
impl BoundingBox {
fn contains(self, other: Self) -> bool {
#[rustfmt::skip]
let ret = self.min_x <= other.min_x && other.max_x <= self.max_x &&
self.min_y <= other.min_y && other.max_y <= self.max_y &&
self.min_z <= other.min_z && other.max_z <= self.max_z;
ret
}
fn same_shape(self, other: Self) -> bool {
#[rustfmt::skip]
let ret = (self.max_x - self.min_x) == (other.max_x - other.min_x) &&
(self.max_y - self.min_y) == (other.max_y - other.min_y) &&
(self.max_z - self.min_z) == (other.max_z - other.min_z);
ret
}
}
use nvtt_sys::NvttSurface;
#[repr(transparent)]
pub struct Surface(*mut NvttSurface);
use thiserror::Error;
#[derive(Error, Clone, Copy, Debug)]
pub enum SurfaceError {
#[error(
"invalid dimenions (expected slice of length at least {expected}, found length {found})"
)]
InvalidDimensions { expected: u32, found: u32 },
#[error("unknown error has occured")]
UnknownError,
}
impl Surface {
pub fn image(input: InputFormat, w: u32, h: u32, d: u32) -> Result<Self, SurfaceError> {
if !input.fit_dim(w, h, d) {
return Err(SurfaceError::InvalidDimensions {
expected: input.min_bytes(w, h, d),
found: input.data().len() as u32,
});
}
unsafe {
let unsigned_to_signed = if let InputFormat::Bgra8Ub {
unsigned_to_signed, ..
} = input
{
unsigned_to_signed
} else {
false
};
let surface_ptr = nvtt_sys::nvttCreateSurface();
if surface_ptr.is_null() {
panic!("failed to allocate");
}
let ret: bool = nvtt_sys::nvttSurfaceSetImageData(
surface_ptr,
input.into_nvtt(),
w as i32,
h as i32,
d as i32,
input.data().as_ptr().cast(),
unsigned_to_signed.into(),
std::ptr::null_mut(),
)
.into();
if !ret {
nvtt_sys::nvttDestroySurface(surface_ptr);
Err(SurfaceError::UnknownError)
} else {
Ok(Self(surface_ptr))
}
}
}
pub fn image_split(
input: SplitInputFormat,
w: u32,
h: u32,
d: u32,
) -> Result<Self, SurfaceError> {
if !input.fit_dim(w, h, d) {
return Err(SurfaceError::InvalidDimensions {
expected: input.min_bytes(w, h, d),
found: input.shortest_slice_len(),
});
}
unsafe {
let surface_ptr = nvtt_sys::nvttCreateSurface();
if surface_ptr.is_null() {
panic!("failed to allocate");
}
let (r, g, b, a) = match input {
SplitInputFormat::Bgra8Ub { b, g, r, a } => (
r.as_ptr().cast(),
g.as_ptr().cast(),
b.as_ptr().cast(),
a.as_ptr().cast(),
),
SplitInputFormat::Bgra8Sb { b, g, r, a } => (
r.as_ptr().cast(),
g.as_ptr().cast(),
b.as_ptr().cast(),
a.as_ptr().cast(),
),
SplitInputFormat::Rgba32f { r, g, b, a } => (
r.as_ptr().cast(),
g.as_ptr().cast(),
b.as_ptr().cast(),
a.as_ptr().cast(),
),
SplitInputFormat::Rgba16f { r, g, b, a } => (
r.as_ptr().cast(),
g.as_ptr().cast(),
b.as_ptr().cast(),
a.as_ptr().cast(),
),
SplitInputFormat::R32f(r) => (
r.as_ptr().cast(),
std::ptr::null(),
std::ptr::null(),
std::ptr::null(),
),
};
let ret: bool = nvtt_sys::nvttSurfaceSetImageRGBA(
surface_ptr,
input.into_nvtt(),
w as i32,
h as i32,
d as i32,
r,
g,
b,
a,
std::ptr::null_mut(),
)
.into();
if !ret {
nvtt_sys::nvttDestroySurface(surface_ptr);
Err(SurfaceError::UnknownError)
} else {
Ok(Self(surface_ptr))
}
}
}
pub fn width(&self) -> u32 {
unsafe { nvtt_sys::nvttSurfaceWidth(self.0) as u32 }
}
pub fn height(&self) -> u32 {
unsafe { nvtt_sys::nvttSurfaceHeight(self.0) as u32 }
}
pub fn depth(&self) -> u32 {
unsafe { nvtt_sys::nvttSurfaceDepth(self.0) as u32 }
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn on_cpu(&self) -> bool {
!self.on_gpu()
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn on_gpu(&self) -> bool {
!self.gpu_data_ptr().is_null()
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn to_gpu(&mut self) {
if !*CUDA_SUPPORTED {
panic!("cuda is not supported");
}
if !self.on_gpu() {
unsafe {
nvtt_sys::nvttSurfaceToGPU(self.0, true.into(), std::ptr::null_mut());
}
}
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn to_cpu(&mut self) {
if self.on_gpu() {
unsafe {
nvtt_sys::nvttSurfaceToCPU(self.0, std::ptr::null_mut());
}
}
}
#[cfg_attr(docsrs, doc(cfg(feature = "cuda")))]
#[cfg(feature = "cuda")]
pub fn gpu_data_ptr(&self) -> *const f32 {
unsafe { nvtt_sys::nvttSurfaceGPUData(self.0) }
}
pub fn texel(&self, channel: Channel, x: u32, y: u32, z: u32) -> f32 {
if x >= self.width() || y >= self.height() || z >= self.depth() {
panic!("texel out of bounds");
}
let channel = channel as i32 as u32;
let index = ((channel * self.depth() + z) * self.height() + y) * self.width() + x;
self.data()[index as usize]
}
pub fn texel_mut(&mut self, channel: Channel, x: u32, y: u32, z: u32) -> &mut f32 {
if x >= self.width() || y >= self.height() || z >= self.depth() {
panic!("texel out of bounds");
}
let channel = channel as i32 as u32;
let index = ((channel * self.depth() + z) * self.height() + y) * self.width() + x;
&mut self.data_mut()[index as usize]
}
pub fn data(&self) -> &[f32] {
unsafe {
let len = self.width() * self.height() * self.depth() * 4;
let ptr = nvtt_sys::nvttSurfaceData(self.0).cast_const();
std::slice::from_raw_parts(ptr, len as usize)
}
}
pub fn data_mut(&mut self) -> &mut [f32] {
cfg_if::cfg_if! {
if #[cfg(feature = "cuda")] {
self.to_cpu();
}
}
unsafe {
let len = self.width() * self.height() * self.depth() * 4;
let ptr = nvtt_sys::nvttSurfaceData(self.0);
std::slice::from_raw_parts_mut(ptr, len as usize)
}
}
pub fn channel(&self, channel: Channel) -> &[f32] {
let channel = channel as i32;
unsafe {
let len = self.width() * self.height() * self.depth();
let ptr = nvtt_sys::nvttSurfaceChannel(self.0, channel).cast_const();
std::slice::from_raw_parts(ptr, len as usize)
}
}
pub fn channel_mut(&mut self, channel: Channel) -> &mut [f32] {
cfg_if::cfg_if! {
if #[cfg(feature = "cuda")] {
self.to_cpu();
}
}
let channel = channel as i32;
unsafe {
let len = self.width() * self.height() * self.depth();
let ptr = nvtt_sys::nvttSurfaceChannel(self.0, channel);
std::slice::from_raw_parts_mut(ptr, len as usize)
}
}
pub fn tex_type(&self) -> TextureType {
unsafe { nvtt_sys::nvttSurfaceType(self.0).into() }
}
pub fn wrap_mode(&self) -> WrapMode {
unsafe { nvtt_sys::nvttSurfaceWrapMode(self.0).into() }
}
pub fn alpha_mode(&self) -> AlphaMode {
unsafe { nvtt_sys::nvttSurfaceAlphaMode(self.0).into() }
}
pub fn is_normal_map(&self) -> bool {
unsafe { nvtt_sys::nvttSurfaceIsNormalMap(self.0).into() }
}
pub fn set_wrap_mode(&mut self, wrap_mode: WrapMode) {
unsafe {
nvtt_sys::nvttSetSurfaceWrapMode(self.0, wrap_mode.into());
}
}
pub fn set_alpha_mode(&mut self, alpha_mode: AlphaMode) {
unsafe {
nvtt_sys::nvttSetSurfaceAlphaMode(self.0, alpha_mode.into());
}
}
pub fn set_normal_map(&mut self, is_normal_map: bool) {
unsafe {
nvtt_sys::nvttSetSurfaceNormalMap(self.0, is_normal_map.into());
}
}
pub fn flip_x(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceFlipX(self.0, std::ptr::null_mut());
}
}
pub fn flip_y(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceFlipY(self.0, std::ptr::null_mut());
}
}
pub fn flip_z(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceFlipZ(self.0, std::ptr::null_mut());
}
}
pub fn copy_channel(
&mut self,
other: &Self,
src_channel: Channel,
dst_channel: Channel,
) -> Result<(), SurfaceError> {
unsafe {
if nvtt_sys::nvttSurfaceCopyChannel(
self.0,
other.0,
src_channel as i32,
dst_channel as i32,
std::ptr::null_mut(),
)
.into()
{
Ok(())
} else {
Err(SurfaceError::InvalidDimensions {
expected: self.channel(dst_channel).len() as u32,
found: other.channel(src_channel).len() as u32,
})
}
}
}
pub fn add_channel(
&mut self,
other: &Self,
src_channel: Channel,
dst_channel: Channel,
scale: f32,
) -> Result<(), SurfaceError> {
unsafe {
if nvtt_sys::nvttSurfaceAddChannel(
self.0,
other.0,
src_channel as i32,
dst_channel as i32,
scale,
std::ptr::null_mut(),
)
.into()
{
Ok(())
} else {
Err(SurfaceError::InvalidDimensions {
expected: self.channel(dst_channel).len() as u32,
found: other.channel(src_channel).len() as u32,
})
}
}
}
pub fn bounds(&self) -> BoundingBox {
BoundingBox {
min_x: 0,
max_x: self.width(),
min_y: 0,
max_y: self.height(),
min_z: 0,
max_z: self.depth(),
}
}
pub fn copy(&mut self, other: &Self, src: BoundingBox, dst: BoundingBox) {
if !other.bounds().contains(dst) || !self.bounds().contains(dst) || !src.same_shape(dst) {
panic!("invalid bounding boxes supplied");
} else {
let xsrc = src.min_x as i32;
let ysrc = src.min_y as i32;
let zsrc = src.min_z as i32;
let xsize = (dst.max_x - dst.min_x) as i32;
let ysize = (dst.max_y - dst.min_y) as i32;
let zsize = (dst.max_z - dst.min_z) as i32;
let xdst = dst.min_x as i32;
let ydst = dst.min_y as i32;
let zdst = dst.min_z as i32;
unsafe {
nvtt_sys::nvttSurfaceCopy(
self.0,
other.0,
xsrc,
ysrc,
zsrc,
xsize,
ysize,
zsize,
xdst,
ydst,
zdst,
std::ptr::null_mut(),
);
}
}
}
pub fn create_sub_image(&self, bounds: BoundingBox) -> Self {
if !self.bounds().contains(bounds) {
panic!("invalid bounds supplied");
} else {
let x0 = bounds.min_x as i32;
let x1 = bounds.max_x as i32;
let y0 = bounds.min_y as i32;
let y1 = bounds.max_y as i32;
let z0 = bounds.min_z as i32;
let z1 = bounds.max_z as i32;
unsafe {
let ptr = nvtt_sys::nvttSurfaceCreateSubImage(
self.0,
x0,
x1,
y0,
y1,
z0,
z1,
std::ptr::null_mut(),
);
if ptr.is_null() {
panic!("failed to allocate");
} else {
Self(ptr)
}
}
}
}
pub fn count_mipmaps(&self) -> u32 {
unsafe { nvtt_sys::nvttSurfaceCountMipmaps(self.0, 1) as u32 }
}
pub fn count_mipmaps_until(&self, min_size: u32) -> u32 {
unsafe { nvtt_sys::nvttSurfaceCountMipmaps(self.0, min_size as i32) as u32 }
}
pub fn can_make_next_mipmap(&self, min_size: u32) -> bool {
unsafe { nvtt_sys::nvttSurfaceCanMakeNextMipmap(self.0, min_size as i32).into() }
}
pub fn build_next_mipmap(&mut self, filter: Filter<Mipmap>, min_size: u32) -> bool {
let filter_width = filter.width;
let params = filter.params();
let params_ptr = filter.params_ptr(¶ms);
unsafe {
nvtt_sys::nvttSurfaceBuildNextMipmap(
self.0,
filter.algorithm.into(),
filter_width,
params_ptr,
min_size as i32,
std::ptr::null_mut(),
)
.into()
}
}
pub fn build_next_mipmap_color(&mut self, rgba: [f32; 4]) -> bool {
unsafe {
nvtt_sys::nvttSurfaceBuildNextMipmapSolidColor(
self.0,
rgba.as_ptr(),
std::ptr::null_mut(),
)
.into()
}
}
pub fn fill(&mut self, rgba: [f32; 4]) {
let r = rgba[0];
let g = rgba[1];
let b = rgba[2];
let a = rgba[3];
unsafe { nvtt_sys::nvttSurfaceFill(self.0, r, g, b, a, std::ptr::null_mut()) }
}
pub fn set_border(&mut self, rgba: [f32; 4]) {
let r = rgba[0];
let g = rgba[1];
let b = rgba[2];
let a = rgba[3];
unsafe { nvtt_sys::nvttSurfaceSetBorder(self.0, r, g, b, a, std::ptr::null_mut()) }
}
pub fn set_atlas_border(&mut self, w: u32, h: u32, rgba: [f32; 4]) {
if w == 0 || h == 0 {
panic!("invalid atlas dimensions");
}
let r = rgba[0];
let g = rgba[1];
let b = rgba[2];
let a = rgba[3];
unsafe {
nvtt_sys::nvttSurfaceSetAtlasBorder(
self.0,
w as i32,
h as i32,
r,
g,
b,
a,
std::ptr::null_mut(),
);
}
}
pub fn resize_filtered(&mut self, w: u32, h: u32, d: u32, filter: Filter<Resize>) {
if w == 0 || h == 0 || d == 0 {
panic!("invalid resize dimensions");
}
let filter_width = filter.width;
let params = filter.params();
let params_ptr = filter.params_ptr(¶ms);
unsafe {
nvtt_sys::nvttSurfaceResize(
self.0,
w as i32,
h as i32,
d as i32,
filter.algorithm.into(),
filter_width,
params_ptr,
std::ptr::null_mut(),
)
}
}
pub fn resize_rounded(&mut self, max_extent: u32, mode: RoundMode, filter: Filter<Resize>) {
if max_extent == 0 {
panic!("invalid max extent");
}
let filter_width = filter.width;
let params = filter.params();
let params_ptr = filter.params_ptr(¶ms);
unsafe {
nvtt_sys::nvttSurfaceResizeMaxParams(
self.0,
max_extent as i32,
mode.into(),
filter.algorithm.into(),
filter_width,
params_ptr,
std::ptr::null_mut(),
)
}
}
pub fn resize_make_square(&mut self, max_extent: u32, mode: RoundMode, filter: Filter<Resize>) {
if max_extent == 0 {
panic!("invalid max extent");
}
unsafe {
nvtt_sys::nvttSurfaceResizeMakeSquare(
self.0,
max_extent as i32,
mode.into(),
filter.algorithm.into(),
std::ptr::null_mut(),
);
}
}
pub fn canvas_resize(&mut self, w: u32, h: u32, d: u32) {
if w == 0 || h == 0 || d == 0 {
panic!("invalid canvas dimensions");
}
unsafe {
nvtt_sys::nvttSurfaceCanvasSize(
self.0,
w as i32,
h as i32,
d as i32,
std::ptr::null_mut(),
);
}
}
pub fn premultiply_alpha(&mut self) {
unsafe {
nvtt_sys::nvttSurfacePremultiplyAlpha(self.0, std::ptr::null_mut());
}
}
pub fn demultiply_alpha(&mut self, epsilon: Option<f32>) {
let epsilon = epsilon.unwrap_or(1e-12);
if epsilon == 0.0 {
panic!("epsilon must be nonzero");
}
unsafe {
nvtt_sys::nvttSurfaceDemultiplyAlpha(self.0, epsilon, std::ptr::null_mut());
}
}
pub fn to_grey_scale(&mut self, rgba_scale: [f32; 4]) {
let r_scale = rgba_scale[0];
let g_scale = rgba_scale[1];
let b_scale = rgba_scale[2];
let a_scale = rgba_scale[3];
unsafe {
nvtt_sys::nvttSurfaceToGreyScale(
self.0,
r_scale,
g_scale,
b_scale,
a_scale,
std::ptr::null_mut(),
)
}
}
pub fn from_gamma(&mut self, gamma: f32) {
unsafe { nvtt_sys::nvttSurfaceToLinear(self.0, gamma, std::ptr::null_mut()) }
}
pub fn channel_from_gamma(&mut self, channel: Channel, gamma: f32) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceToLinearChannel(self.0, channel, gamma, std::ptr::null_mut())
}
}
pub fn to_gamma(&mut self, gamma: f32) {
unsafe { nvtt_sys::nvttSurfaceToGamma(self.0, gamma, std::ptr::null_mut()) }
}
pub fn channel_to_gamma(&mut self, channel: Channel, gamma: f32) {
let channel = channel as i32;
unsafe { nvtt_sys::nvttSurfaceToGammaChannel(self.0, channel, gamma, std::ptr::null_mut()) }
}
pub fn to_srgb(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToSrgb(self.0, std::ptr::null_mut());
}
}
pub fn from_srgb(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToLinearFromSrgb(self.0, std::ptr::null_mut());
}
}
pub fn to_xenon_srgb(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToXenonSrgb(self.0, std::ptr::null_mut());
}
}
pub fn to_rgbm(&mut self, range: Option<f32>, threshold: Option<f32>) {
let range = range.unwrap_or(1.0);
let threshold = threshold.unwrap_or(0.25);
unsafe {
nvtt_sys::nvttSurfaceToRGBM(self.0, range, threshold, std::ptr::null_mut());
}
}
pub fn from_rgbm(&mut self, range: Option<f32>, threshold: Option<f32>) {
let range = range.unwrap_or(1.0);
let threshold = threshold.unwrap_or(0.25);
unsafe {
nvtt_sys::nvttSurfaceFromRGBM(self.0, range, threshold, std::ptr::null_mut());
}
}
pub fn tonemap(&mut self, tm: ToneMapper) {
let parameters = std::ptr::null_mut();
unsafe {
nvtt_sys::nvttSurfaceToneMap(self.0, tm.into(), parameters, std::ptr::null_mut());
}
}
pub unsafe fn to_rgbe(&mut self, mantissa_bits: u32, exponent_bits: u32) {
unsafe {
nvtt_sys::nvttSurfaceToRGBE(
self.0,
mantissa_bits as i32,
exponent_bits as i32,
std::ptr::null_mut(),
);
}
}
pub unsafe fn from_rgbe(&mut self, mantissa_bits: u32, exponent_bits: u32) {
unsafe {
nvtt_sys::nvttSurfaceFromRGBE(
self.0,
mantissa_bits as i32,
exponent_bits as i32,
std::ptr::null_mut(),
);
}
}
pub fn to_ycocg(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToYCoCg(self.0, std::ptr::null_mut());
}
}
pub unsafe fn block_scale_cocg(&mut self, bits: Option<u32>) {
let bits = bits.unwrap_or(5);
let threshold = 0.0;
unsafe {
nvtt_sys::nvttSurfaceBlockScaleCoCg(
self.0,
bits as i32,
threshold,
std::ptr::null_mut(),
)
}
}
pub fn from_ycocg(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToYCoCg(self.0, std::ptr::null_mut());
}
}
pub fn to_lm(&mut self, range: Option<f32>, threshold: Option<f32>) {
let range = range.unwrap_or(1.0);
let threshold = threshold.unwrap_or(0.0);
unsafe {
nvtt_sys::nvttSurfaceToLM(self.0, range, threshold, std::ptr::null_mut());
}
}
pub fn to_luvw(&mut self, range: Option<f32>) {
let range = range.unwrap_or(1.0);
unsafe {
nvtt_sys::nvttSurfaceToLUVW(self.0, range, std::ptr::null_mut());
}
}
pub fn from_luvw(&mut self, range: Option<f32>) {
let range = range.unwrap_or(1.0);
unsafe {
nvtt_sys::nvttSurfaceFromLUVW(self.0, range, std::ptr::null_mut());
}
}
pub fn to_log_scale(&mut self, channel: Channel, base: f32) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceToLogScale(self.0, channel, base, std::ptr::null_mut());
}
}
pub fn from_log_scale(&mut self, channel: Channel, base: f32) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceFromLogScale(self.0, channel, base, std::ptr::null_mut());
}
}
pub fn alpha_test_coverage(&self, alpha_ref: f32, alpha_channel: Channel) -> f32 {
let alpha_channel = alpha_channel as i32;
unsafe { nvtt_sys::nvttSurfaceAlphaTestCoverage(self.0, alpha_ref, alpha_channel) }
}
pub fn scale_alpha_to_coverage(
&mut self,
coverage: f32,
alpha_ref: f32,
alpha_channel: Channel,
) {
let alpha_channel = alpha_channel as i32;
unsafe {
nvtt_sys::nvttSurfaceScaleAlphaToCoverage(
self.0,
coverage,
alpha_ref,
alpha_channel,
std::ptr::null_mut(),
)
}
}
pub fn average(
&self,
channel: Channel,
alpha_channel: Option<Channel>,
gamma: Option<f32>,
) -> f32 {
let gamma = gamma.unwrap_or(2.2);
let channel = channel as i32;
let alpha_channel = alpha_channel.map(|x| x as i32).unwrap_or(-1);
unsafe { nvtt_sys::nvttSurfaceAverage(self.0, channel, alpha_channel, gamma) }
}
pub fn histogram(&self, channel: Channel, range_min: f32, range_max: f32, bins: &mut [i32]) {
if bins.is_empty() {
panic!("bins must be non empty");
}
let channel = channel as i32;
let bin_count = bins.len() as i32;
let bin_ptr = bins.as_mut_ptr();
unsafe {
nvtt_sys::nvttSurfaceHistogram(
self.0,
channel,
range_min,
range_max,
bin_count,
bin_ptr,
std::ptr::null_mut(),
)
}
}
pub fn range(
&self,
channel: Channel,
alpha_channel: Option<Channel>,
alpha_ref: f32,
) -> (f32, f32) {
let channel = channel as i32;
let alpha_channel = alpha_channel.map(|x| x as i32).unwrap_or(-1);
let mut range_min: f32 = 0.0;
let mut range_max: f32 = 0.0;
let range_min_ptr: *mut f32 = &mut range_min;
let range_max_ptr: *mut f32 = &mut range_max;
unsafe {
nvtt_sys::nvttSurfaceRange(
self.0,
channel,
range_min_ptr,
range_max_ptr,
alpha_channel,
alpha_ref,
std::ptr::null_mut(),
)
}
(range_min, range_max)
}
pub fn transform(
&mut self,
w0: [f32; 4],
w1: [f32; 4],
w2: [f32; 4],
w3: [f32; 4],
offset: [f32; 4],
) {
unsafe {
nvtt_sys::nvttSurfaceTransform(
self.0,
w0.as_ptr(),
w1.as_ptr(),
w2.as_ptr(),
w3.as_ptr(),
offset.as_ptr(),
std::ptr::null_mut(),
);
}
}
pub fn swizzle(&mut self, r: Swizzle, g: Swizzle, b: Swizzle, a: Swizzle) {
unsafe {
nvtt_sys::nvttSurfaceSwizzle(
self.0,
r as i32,
g as i32,
b as i32,
a as i32,
std::ptr::null_mut(),
)
}
}
pub fn scale_bias(&mut self, channel: Channel, scale: f32, bias: f32) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceScaleBias(self.0, channel, scale, bias, std::ptr::null_mut());
}
}
pub fn abs(&mut self, channel: Channel) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceAbs(self.0, channel, std::ptr::null_mut());
}
}
pub fn clamp(&mut self, channel: Channel, low: f32, high: f32) {
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceClamp(self.0, channel, low, high, std::ptr::null_mut());
}
}
pub fn blend(&mut self, rgba: [f32; 4], t: f32) {
let r = rgba[0];
let g = rgba[1];
let b = rgba[2];
let a = rgba[3];
let t = t.clamp(0.0, 1.0);
unsafe { nvtt_sys::nvttSurfaceBlend(self.0, r, g, b, a, t, std::ptr::null_mut()) }
}
pub fn convolve_slice(&mut self, channel: Channel, dim: u32, kernel: &mut [f32]) {
if dim * dim > kernel.len() as u32 {
panic!("kernel does not hold enough values");
}
if dim == 0 {
panic!("kernel must not be empty");
}
let channel = channel as i32;
let kernel_ptr = kernel.as_mut_ptr();
unsafe {
nvtt_sys::nvttSurfaceConvolve(
self.0,
channel,
dim as i32,
kernel_ptr,
std::ptr::null_mut(),
);
}
}
pub fn convolve<const N: usize>(&mut self, channel: Channel, mut kernel: [[f32; N]; N]) {
if N == 0 {
panic!("kernel must not be empty");
}
let channel = channel as i32;
let kernel_ptr = kernel[0].as_mut_ptr();
unsafe {
nvtt_sys::nvttSurfaceConvolve(
self.0,
channel,
N as i32,
kernel_ptr,
std::ptr::null_mut(),
);
}
}
pub fn binarize(&mut self, channel: Channel, threshold: f32, dither: bool) {
if dither && self.depth() > 1 {
panic!("binarize dithering not supported for 3D surfaces");
}
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceBinarize(
self.0,
channel,
threshold,
dither.into(),
std::ptr::null_mut(),
);
}
}
pub unsafe fn quantize(
&mut self,
channel: Channel,
bits: u32,
exact_endpoints: bool,
dither: bool,
) {
if dither && self.depth() > 1 {
panic!("quantize dithering not supported for 3D surfaces");
}
let channel = channel as i32;
unsafe {
nvtt_sys::nvttSurfaceQuantize(
self.0,
channel,
bits as i32,
exact_endpoints.into(),
dither.into(),
std::ptr::null_mut(),
);
}
}
pub fn to_normal_map(&mut self, sm: f32, medium: f32, big: f32, large: f32) {
unsafe {
nvtt_sys::nvttSurfaceToNormalMap(self.0, sm, medium, big, large, std::ptr::null_mut());
}
}
pub fn transform_normals(&mut self, transform: NormalTransform) {
unsafe {
nvtt_sys::nvttSurfaceTransformNormals(self.0, transform.into(), std::ptr::null_mut());
}
}
pub fn reconstruct_normals(&mut self, transform: NormalTransform) {
unsafe {
nvtt_sys::nvttSurfaceReconstructNormals(self.0, transform.into(), std::ptr::null_mut());
}
}
pub fn to_clean_normal_map(&mut self) {
unsafe {
nvtt_sys::nvttSurfaceToCleanNormalMap(self.0, std::ptr::null_mut());
}
}
pub fn pack_normals(&mut self) {
let scale = 0.5;
let bias = 0.5;
unsafe {
nvtt_sys::nvttSurfacePackNormals(self.0, scale, bias, std::ptr::null_mut());
}
}
pub fn unpack_normals(&mut self) {
let scale = 2.0;
let bias = -1.0;
unsafe {
nvtt_sys::nvttSurfaceExpandNormals(self.0, scale, bias, std::ptr::null_mut());
}
}
}
impl Drop for Surface {
fn drop(&mut self) {
unsafe {
nvtt_sys::nvttDestroySurface(self.0);
}
}
}
impl Clone for Surface {
fn clone(&self) -> Self {
unsafe {
let ptr = nvtt_sys::nvttSurfaceClone(self.0);
if ptr.is_null() {
panic!("failed to allocate");
} else {
Self(ptr)
}
}
}
}
#[cfg(test)]
mod tests {
#[cfg(feature = "cuda")]
use crate::CUDA_SUPPORTED;
use crate::{Channel, InputFormat, Surface, TextureType};
use approx::assert_relative_eq;
#[test]
fn pixel() {
let input_format = InputFormat::Bgra8Ub {
data: &[255, 0, 0, 0],
unsigned_to_signed: false,
};
let surface = Surface::image(input_format, 1, 1, 1).unwrap();
assert_eq!(1, surface.width());
assert_eq!(1, surface.height());
assert_eq!(1, surface.depth());
assert_eq!(TextureType::D2, surface.tex_type());
assert_eq!(4, surface.data().len());
assert_eq!(1, surface.channel(Channel::R).len());
assert_eq!(1, surface.channel(Channel::G).len());
assert_eq!(1, surface.channel(Channel::B).len());
assert_eq!(1, surface.channel(Channel::A).len());
assert_relative_eq!(0.0, surface.data()[0]);
assert_relative_eq!(0.0, surface.data()[1]);
assert_relative_eq!(1.0, surface.data()[2]);
assert_relative_eq!(0.0, surface.data()[3]);
assert_relative_eq!(0.0, surface.channel(Channel::R)[0]);
assert_relative_eq!(0.0, surface.channel(Channel::G)[0]);
assert_relative_eq!(1.0, surface.channel(Channel::B)[0]);
assert_relative_eq!(0.0, surface.channel(Channel::A)[0]);
}
fn unsigned_conv(x: u8) -> f32 {
match x {
0 => -1.0,
x => {
let x = (x - 1) as f32;
let t = x / 254.0;
-1.0 + t * (2.0)
}
}
}
#[test]
fn unsigned_to_signed() {
let b = 1;
let g = 0;
let r = 128;
let a = 255;
let input_format = InputFormat::Bgra8Ub {
data: &[b, g, r, a],
unsigned_to_signed: true,
};
let surface = Surface::image(input_format, 1, 1, 1).unwrap();
assert_relative_eq!(unsigned_conv(r), surface.channel(Channel::R)[0]);
assert_relative_eq!(unsigned_conv(g), surface.channel(Channel::G)[0]);
assert_relative_eq!(unsigned_conv(b), surface.channel(Channel::B)[0]);
assert_relative_eq!(unsigned_conv(a), surface.channel(Channel::A)[0]);
let g_conv = surface.channel(Channel::G)[0];
let b_conv = surface.channel(Channel::B)[0];
assert_relative_eq!(g_conv, b_conv);
assert_relative_eq!(0.0, surface.channel(Channel::R)[0]);
assert_relative_eq!(-1.0, surface.channel(Channel::G)[0]);
assert_relative_eq!(-1.0, surface.channel(Channel::B)[0]);
assert_relative_eq!(1.0, surface.channel(Channel::A)[0]);
}
#[cfg(feature = "cuda")]
const BASIC_INPUT: InputFormat = InputFormat::Bgra8Ub {
data: &[255, 255, 255, 255],
unsigned_to_signed: false,
};
#[test]
#[cfg(feature = "cuda")]
fn channel_mut_cpu_gpu() {
if *CUDA_SUPPORTED {
let mut surface = Surface::image(BASIC_INPUT, 1, 1, 1).unwrap();
assert!(!surface.on_gpu());
assert!(surface.gpu_data_ptr().is_null());
surface.to_gpu();
assert!(surface.on_gpu());
assert!(!surface.gpu_data_ptr().is_null());
assert_relative_eq!(1.0, surface.channel(Channel::R)[0]);
assert!(surface.on_gpu());
assert!(!surface.gpu_data_ptr().is_null());
surface.channel_mut(Channel::R)[0] = 0.0;
assert!(!surface.on_gpu());
assert_relative_eq!(0.0, surface.channel(Channel::R)[0]);
surface.to_gpu();
assert!(surface.on_gpu());
assert!(!surface.gpu_data_ptr().is_null());
assert_relative_eq!(0.0, surface.data()[0]);
assert!(surface.on_gpu());
assert!(!surface.gpu_data_ptr().is_null());
surface.data_mut()[0] = 1.0;
assert!(!surface.on_gpu());
assert_relative_eq!(1.0, surface.channel(Channel::R)[0]);
surface.to_gpu();
assert!(surface.on_gpu());
assert!(!surface.gpu_data_ptr().is_null());
}
}
#[test]
#[cfg(feature = "cuda")]
fn function_mut_cpu_gpu() {
if *CUDA_SUPPORTED {
let mut surface = Surface::image(BASIC_INPUT, 1, 1, 1).unwrap();
surface.to_gpu();
assert!(surface.on_gpu());
let old_r = surface.channel(Channel::R)[0];
let old_g = surface.channel(Channel::G)[0];
let old_b = surface.channel(Channel::B)[0];
let old_a = surface.channel(Channel::A)[0];
assert!(surface.on_gpu());
let w0 = [2., 0., 0., 0.];
let w1 = [0., 4., 0., 0.];
let w2 = [0., 0., 6., 0.];
let w3 = [0., 0., 0., 8.];
let offset = [1., 1., 1., 1.];
surface.transform(w0, w1, w2, w3, offset);
assert!(surface.on_gpu());
let new_r = surface.channel(Channel::R)[0];
assert!(surface.on_gpu());
assert_relative_eq!(2.0 * old_r + 1.0, new_r);
assert_relative_eq!(4.0 * old_g + 1.0, surface.channel(Channel::G)[0]);
assert_relative_eq!(6.0 * old_b + 1.0, surface.channel(Channel::B)[0]);
assert_relative_eq!(8.0 * old_a + 1.0, surface.channel(Channel::A)[0]);
}
}
#[test]
fn rgbe() {
let input_format = InputFormat::Bgra8Ub {
data: &[32, 64, 128, 234, 255, 32, 64, 85],
unsigned_to_signed: false,
};
let mut surface = Surface::image(input_format, 2, 1, 1).unwrap();
unsafe {
surface.to_rgbe(25, 25);
surface.from_rgbe(25, 25);
}
}
#[test]
fn compression() {
use crate::{
CompressionOptions, Context, Format, InputFormat, OutputOptions, Quality, Surface,
};
let input = InputFormat::Bgra8Ub {
data: &[0u8; 16 * 16 * 4],
unsigned_to_signed: false,
};
let image = Surface::image(input, 16, 16, 1).unwrap();
let context = Context::new();
#[cfg(feature = "cuda")]
let context = {
if *crate::CUDA_SUPPORTED {
let mut context = context;
context.set_cuda_acceleration(true);
context
} else {
context
}
};
let mut compression_options = CompressionOptions::new();
compression_options.set_quality(Quality::Fastest);
let output_options = OutputOptions::new();
compression_options.set_format(Format::Bc1);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16 / 2, bytes.len());
compression_options.set_format(Format::Bc2);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16, bytes.len());
compression_options.set_format(Format::Bc3);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16, bytes.len());
compression_options.set_format(Format::Bc4S);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16 / 2, bytes.len());
compression_options.set_format(Format::Bc5S);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16, bytes.len());
compression_options.set_format(Format::Bc6S);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16, bytes.len());
compression_options.set_format(Format::Bc7);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16, bytes.len());
compression_options.set_format(Format::Rgba);
let bytes = context
.compress(&image, &compression_options, &output_options)
.unwrap();
assert_eq!(16 * 16 * 4, bytes.len());
}
}