use crate::__gl;
use crate::__gl::types::{GLbitfield, GLuint};
use std::ops::Range;
use std::{mem, ptr, slice};
use crate::debug::{Object, ObjectType};
use crate::device::Device;
use crate::error::Result;
#[derive(Clone, Copy)]
pub struct Buffer(pub(crate) GLuint, GLbitfield);
impl Object for Buffer {
const TYPE: ObjectType = ObjectType::Buffer;
fn handle(&self) -> GLuint {
self.0
}
}
pub struct BufferRange {
pub buffer: Buffer,
pub offset: usize,
pub size: usize,
}
impl Device {
unsafe fn create_buffer_impl(
&self,
size: isize,
data_ptr: *const (),
memory: MemoryFlags,
) -> Result<Buffer> {
let flags = {
let mut flags = 0;
if !memory.contains(MemoryFlags::DEVICE_LOCAL) {
flags |= __gl::CLIENT_STORAGE_BIT;
}
if memory.contains(MemoryFlags::COHERENT) {
flags |= __gl::MAP_COHERENT_BIT | __gl::MAP_PERSISTENT_BIT;
}
if memory.contains(MemoryFlags::CPU_MAP_READ) {
flags |= __gl::MAP_READ_BIT | __gl::MAP_PERSISTENT_BIT;
}
if memory.contains(MemoryFlags::CPU_MAP_WRITE) {
flags |= __gl::MAP_WRITE_BIT | __gl::MAP_PERSISTENT_BIT;
}
if memory.contains(MemoryFlags::DYNAMIC) {
flags |= __gl::DYNAMIC_STORAGE_BIT;
}
flags
};
let mut buffer = 0;
{
self.0.CreateBuffers(1, &mut buffer);
self.get_error()?;
self.0
.NamedBufferStorage(buffer, size, data_ptr as *const _, flags);
self.get_error()?;
}
Ok(Buffer(buffer, flags))
}
pub unsafe fn create_buffer(&self, size: u64, memory: MemoryFlags) -> Result<Buffer> {
self.create_buffer_impl(size as _, ptr::null(), memory)
}
pub unsafe fn create_buffer_from_host(
&self,
data: &[u8],
memory: MemoryFlags,
) -> Result<Buffer> {
self.create_buffer_impl(data.len() as _, data.as_ptr() as *const _, memory)
}
#[allow(clippy::mut_from_ref)]
pub unsafe fn map_buffer<T>(
&self,
buffer: Buffer,
range: Range<u64>,
mapping: MappingFlags,
) -> &mut [T] {
let len = range.end - range.start;
let stride = mem::size_of::<T>();
assert_eq!(len % stride as u64, 0);
let mut flags = 0;
if mapping.contains(MappingFlags::UNSYNCHRONIZED) {
flags |= __gl::MAP_UNSYNCHRONIZED_BIT;
}
flags |= buffer.1
& (__gl::MAP_COHERENT_BIT
| __gl::MAP_PERSISTENT_BIT
| __gl::MAP_READ_BIT
| __gl::MAP_WRITE_BIT);
let stride = mem::size_of::<T>();
let ptr = {
self.0
.MapNamedBufferRange(buffer.0, range.start as _, len as _, flags)
as *mut _
};
slice::from_raw_parts_mut(ptr, len as usize / stride)
}
pub unsafe fn unmap_buffer(&self, buffer: Buffer) -> bool {
self.0.UnmapNamedBuffer(buffer.0) != 0
}
pub unsafe fn delete_buffer(&self, buffer: Buffer) {
self.delete_buffers(&[buffer]);
}
pub unsafe fn delete_buffers(&self, buffers: &[Buffer]) {
let buffers = buffers.iter().map(|buffer| buffer.0).collect::<Vec<_>>();
self.0.DeleteBuffers(buffers.len() as _, buffers.as_ptr());
}
pub unsafe fn copy_host_to_buffer(&self, buffer: Buffer, offset: isize, data: &[u8]) {
self.0
.NamedBufferSubData(buffer.0, offset, data.len() as _, data.as_ptr() as *const _);
}
pub unsafe fn bind_uniform_buffers(&self, first: u32, ranges: &[BufferRange]) {
let buffers = ranges.iter().map(|view| view.buffer.0).collect::<Vec<_>>();
let offsets = ranges
.iter()
.map(|view| view.offset as _)
.collect::<Vec<_>>();
let sizes = ranges.iter().map(|view| view.size as _).collect::<Vec<_>>();
self.0.BindBuffersRange(
__gl::UNIFORM_BUFFER,
first,
ranges.len() as _,
buffers.as_ptr(),
offsets.as_ptr(),
sizes.as_ptr(),
);
}
pub unsafe fn bind_storage_buffers(&self, first: u32, ranges: &[BufferRange]) {
let buffers = ranges.iter().map(|view| view.buffer.0).collect::<Vec<_>>();
let offsets = ranges
.iter()
.map(|view| view.offset as _)
.collect::<Vec<_>>();
let sizes = ranges.iter().map(|view| view.size as _).collect::<Vec<_>>();
self.0.BindBuffersRange(
__gl::SHADER_STORAGE_BUFFER,
first,
ranges.len() as _,
buffers.as_ptr(),
offsets.as_ptr(),
sizes.as_ptr(),
);
}
pub unsafe fn bind_draw_indirect_buffer(&self, buffer: Buffer) {
self.0.BindBuffer(__gl::DRAW_INDIRECT_BUFFER, buffer.0);
}
pub unsafe fn unbind_draw_indirect_buffer(&self) {
self.0.BindBuffer(__gl::DRAW_INDIRECT_BUFFER, 0);
}
pub unsafe fn bind_dispatch_indirect_buffer(&self, buffer: Buffer) {
self.0.BindBuffer(__gl::DISPATCH_INDIRECT_BUFFER, buffer.0);
}
pub unsafe fn unbind_dispatch_indirect_buffer(&self) {
self.0.BindBuffer(__gl::DISPATCH_INDIRECT_BUFFER, 0);
}
pub(crate) unsafe fn bind_pixel_pack_buffer(&self, buffer: Buffer) {
self.0.BindBuffer(__gl::PIXEL_PACK_BUFFER, buffer.0);
}
pub(crate) unsafe fn unbind_pixel_pack_buffer(&self) {
self.0.BindBuffer(__gl::PIXEL_PACK_BUFFER, 0);
}
pub(crate) unsafe fn bind_pixel_unpack_buffer(&self, buffer: Buffer) {
self.0.BindBuffer(__gl::PIXEL_UNPACK_BUFFER, buffer.0);
}
pub(crate) unsafe fn unbind_pixel_unpack_buffer(&self) {
self.0.BindBuffer(__gl::PIXEL_UNPACK_BUFFER, 0);
}
pub unsafe fn bind_parameter_buffer(&self, buffer: Buffer) {
self.0.BindBuffer(__gl::PARAMETER_BUFFER, buffer.0);
}
pub(crate) unsafe fn get_buffer_size(&self, buffer: Buffer) -> u64 {
let mut size = 0;
self.0
.GetNamedBufferParameteri64v(buffer.0, __gl::BUFFER_SIZE, &mut size);
size as _
}
}
bitflags!(
pub struct MemoryFlags: u8 {
const DEVICE_LOCAL = 0x1;
const COHERENT = 0x2;
const CPU_MAP_READ = 0x4;
const CPU_MAP_WRITE = 0x8;
const DYNAMIC = 0x10;
}
);
bitflags!(
pub struct MappingFlags: u8 {
const UNSYNCHRONIZED = 0x1;
}
);