pub mod backend;
pub mod opengl;
pub mod vulkan;
#[cfg(target_os = "macos")]
pub mod metal;
#[cfg(target_os = "windows")]
pub mod directx;
mod types;
pub use backend::*;
pub use types::*;
use super::{Handle, HandleStore};
use crate::fitz::geometry::Matrix;
use std::sync::LazyLock;
pub static GPU_DEVICES: LazyLock<HandleStore<Box<dyn GpuDevice + Send + Sync>>> =
LazyLock::new(HandleStore::new);
pub static GPU_TEXTURES: LazyLock<HandleStore<GpuTexture>> = LazyLock::new(HandleStore::new);
pub static GPU_SHADERS: LazyLock<HandleStore<GpuShader>> = LazyLock::new(HandleStore::new);
pub static GPU_BUFFERS: LazyLock<HandleStore<GpuBuffer>> = LazyLock::new(HandleStore::new);
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_create_device(backend: i32) -> Handle {
let backend_type = match backend {
0 => GpuBackendType::Auto,
1 => GpuBackendType::OpenGL,
2 => GpuBackendType::Vulkan,
3 => GpuBackendType::Metal,
4 => GpuBackendType::DirectX11,
5 => GpuBackendType::DirectX12,
_ => return 0,
};
match create_device(backend_type) {
Ok(device) => GPU_DEVICES.insert(device),
Err(_) => 0,
}
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_drop_device(device: Handle) {
GPU_DEVICES.remove(device);
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_device_backend(device: Handle) -> i32 {
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
return match guard.backend() {
GpuBackendType::Auto => 0,
GpuBackendType::OpenGL => 1,
GpuBackendType::Vulkan => 2,
GpuBackendType::Metal => 3,
GpuBackendType::DirectX11 => 4,
GpuBackendType::DirectX12 => 5,
};
}
}
-1
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_backend_available(backend: i32) -> i32 {
let backend_type = match backend {
1 => GpuBackendType::OpenGL,
2 => GpuBackendType::Vulkan,
3 => GpuBackendType::Metal,
4 => GpuBackendType::DirectX11,
5 => GpuBackendType::DirectX12,
_ => return 0,
};
i32::from(is_backend_available(backend_type))
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_create_texture(
device: Handle,
width: i32,
height: i32,
format: i32,
) -> Handle {
let fmt = match format {
0 => GpuFormat::Rgba8,
1 => GpuFormat::Bgra8,
2 => GpuFormat::Rgb8,
3 => GpuFormat::R8,
4 => GpuFormat::Rgba16f,
5 => GpuFormat::Rgba32f,
_ => return 0,
};
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Ok(texture) = guard.create_texture(width as u32, height as u32, fmt) {
return GPU_TEXTURES.insert(texture);
}
}
}
0
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_drop_texture(texture: Handle) {
GPU_TEXTURES.remove(texture);
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_texture_width(texture: Handle) -> i32 {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(guard) = tex.lock() {
return guard.width as i32;
}
}
0
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_texture_height(texture: Handle) -> i32 {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(guard) = tex.lock() {
return guard.height as i32;
}
}
0
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn fz_gpu_texture_upload(
device: Handle,
texture: Handle,
data: *const u8,
stride: i32,
) -> i32 {
if data.is_null() {
return -1;
}
let (width, height, format) = if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(guard) = tex.lock() {
(guard.width, guard.height, guard.format)
} else {
return -1;
}
} else {
return -1;
};
let bytes_per_pixel = format.bytes_per_pixel();
let data_size = if stride > 0 {
(stride as usize) * (height as usize)
} else {
(width as usize) * bytes_per_pixel * (height as usize)
};
let slice = unsafe { std::slice::from_raw_parts(data, data_size) };
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(mut tex_guard) = tex.lock() {
if guard
.upload_texture(&mut tex_guard, slice, stride as u32)
.is_ok()
{
return 0;
}
}
}
}
}
-1
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn fz_gpu_texture_download(
device: Handle,
texture: Handle,
data: *mut u8,
stride: i32,
) -> i32 {
if data.is_null() {
return -1;
}
let (width, height, format) = if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(guard) = tex.lock() {
(guard.width, guard.height, guard.format)
} else {
return -1;
}
} else {
return -1;
};
let bytes_per_pixel = format.bytes_per_pixel();
let data_size = if stride > 0 {
(stride as usize) * (height as usize)
} else {
(width as usize) * bytes_per_pixel * (height as usize)
};
let slice = unsafe { std::slice::from_raw_parts_mut(data, data_size) };
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(tex_guard) = tex.lock() {
if guard
.download_texture(&tex_guard, slice, stride as u32)
.is_ok()
{
return 0;
}
}
}
}
}
-1
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_render_page(
device: Handle,
page: Handle,
texture: Handle,
ctm: *const f32,
) -> i32 {
if ctm.is_null() {
return -1;
}
let matrix = unsafe {
Matrix::new(
*ctm,
*ctm.add(1),
*ctm.add(2),
*ctm.add(3),
*ctm.add(4),
*ctm.add(5),
)
};
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(mut tex_guard) = tex.lock() {
if guard.render_page(page, &mut tex_guard, &matrix).is_ok() {
return 0;
}
}
}
}
}
-1
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_clear_texture(
device: Handle,
texture: Handle,
r: f32,
g: f32,
b: f32,
a: f32,
) -> i32 {
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Some(tex) = GPU_TEXTURES.get(texture) {
if let Ok(mut tex_guard) = tex.lock() {
if guard.clear_texture(&mut tex_guard, [r, g, b, a]).is_ok() {
return 0;
}
}
}
}
}
-1
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_composite(
device: Handle,
src: Handle,
dst: Handle,
x: i32,
y: i32,
blend_mode: i32,
) -> i32 {
let mode = match blend_mode {
0 => GpuBlendMode::Normal,
1 => GpuBlendMode::Multiply,
2 => GpuBlendMode::Screen,
3 => GpuBlendMode::Overlay,
4 => GpuBlendMode::Darken,
5 => GpuBlendMode::Lighten,
_ => GpuBlendMode::Normal,
};
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Some(src_tex) = GPU_TEXTURES.get(src) {
if let Some(dst_tex) = GPU_TEXTURES.get(dst) {
if let Ok(src_guard) = src_tex.lock() {
if let Ok(mut dst_guard) = dst_tex.lock() {
if guard
.composite(&src_guard, &mut dst_guard, x, y, mode)
.is_ok()
{
return 0;
}
}
}
}
}
}
}
-1
}
#[unsafe(no_mangle)]
pub unsafe extern "C" fn fz_gpu_create_shader(
device: Handle,
vertex_src: *const std::ffi::c_char,
fragment_src: *const std::ffi::c_char,
) -> Handle {
if vertex_src.is_null() || fragment_src.is_null() {
return 0;
}
let vertex = match unsafe { std::ffi::CStr::from_ptr(vertex_src) }.to_str() {
Ok(s) => s,
Err(_) => return 0,
};
let fragment = match unsafe { std::ffi::CStr::from_ptr(fragment_src) }.to_str() {
Ok(s) => s,
Err(_) => return 0,
};
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Ok(shader) = guard.create_shader(vertex, fragment) {
return GPU_SHADERS.insert(shader);
}
}
}
0
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_drop_shader(shader: Handle) {
GPU_SHADERS.remove(shader);
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_create_buffer(device: Handle, size: i32, usage: i32) -> Handle {
let buffer_usage = match usage {
0 => GpuBufferUsage::Vertex,
1 => GpuBufferUsage::Index,
2 => GpuBufferUsage::Uniform,
3 => GpuBufferUsage::Storage,
_ => return 0,
};
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if let Ok(buffer) = guard.create_buffer(size as usize, buffer_usage) {
return GPU_BUFFERS.insert(buffer);
}
}
}
0
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_drop_buffer(buffer: Handle) {
GPU_BUFFERS.remove(buffer);
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_flush(device: Handle) -> i32 {
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if guard.flush().is_ok() {
return 0;
}
}
}
-1
}
#[unsafe(no_mangle)]
pub extern "C" fn fz_gpu_finish(device: Handle) -> i32 {
if let Some(dev) = GPU_DEVICES.get(device) {
if let Ok(guard) = dev.lock() {
if guard.finish().is_ok() {
return 0;
}
}
}
-1
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_backend_availability() {
let _opengl = fz_gpu_backend_available(1);
let _vulkan = fz_gpu_backend_available(2);
}
#[test]
fn test_invalid_backend() {
assert_eq!(fz_gpu_backend_available(99), 0);
}
}