use crate::{
BufferLayout, DrawOrder, GpuBuffer, GpuDevice, GpuRenderer, Index,
parallel::*,
};
use std::{cmp::Ordering, ops::Range};
#[derive(Debug, Copy, Clone)]
pub struct InstanceDetails {
pub start: u32,
pub end: u32,
}
#[derive(Clone, Copy, Debug, Default)]
pub struct OrderedIndex {
pub(crate) order: DrawOrder,
pub(crate) index: Index,
}
impl PartialOrd for OrderedIndex {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl PartialEq for OrderedIndex {
fn eq(&self, other: &Self) -> bool {
self.order == other.order
}
}
impl Eq for OrderedIndex {}
impl Ord for OrderedIndex {
fn cmp(&self, other: &Self) -> Ordering {
self.order.cmp(&other.order)
}
}
impl OrderedIndex {
pub fn new(order: DrawOrder, index: Index) -> Self {
Self { order, index }
}
}
#[derive(Debug)]
pub struct InstanceBuffer<K: BufferLayout> {
pub unprocessed: Vec<(usize, OrderedIndex)>,
pub buffers: Vec<Option<InstanceDetails>>,
pub buffer: GpuBuffer<K>,
needed_size: usize,
}
impl<K: BufferLayout> InstanceBuffer<K> {
pub fn create_buffer(gpu_device: &GpuDevice, data: &[u8]) -> Self {
InstanceBuffer {
unprocessed: Vec::new(),
buffers: Vec::new(),
buffer: GpuBuffer::new(
gpu_device,
data,
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
Some("Instance Buffer"),
),
needed_size: 0,
}
}
pub fn create_buffer_with(
gpu_device: &GpuDevice,
data: &[u8],
capacity: usize,
) -> Self {
let size = capacity.max(1);
let unprocessed = Vec::with_capacity(size);
InstanceBuffer {
unprocessed,
buffers: Vec::with_capacity(size),
buffer: GpuBuffer::new(
gpu_device,
data,
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
Some("Instance Buffer"),
),
needed_size: 0,
}
}
pub fn add_buffer_store(
&mut self,
renderer: &GpuRenderer,
index: OrderedIndex,
buffer_layer: usize,
) {
if let Some(store) = renderer.get_ibo_store(index.index) {
self.needed_size += store.store.len();
self.unprocessed.push((buffer_layer, index));
}
}
fn buffer_write(
&self,
renderer: &mut GpuRenderer,
buf: &OrderedIndex,
pos: &mut usize,
count: &mut u32,
changed: bool,
) {
let mut write_buffer = false;
let old_pos = *pos as u64;
if let Some(store) = renderer.get_ibo_store_mut(buf.index) {
let range = *pos..*pos + store.store.len();
if store.store_pos != range || changed || store.changed {
store.store_pos = range;
store.changed = false;
write_buffer = true;
}
*pos += store.store.len();
*count += (store.store.len() / K::stride()) as u32;
}
if write_buffer && let Some(store) = renderer.get_ibo_store(buf.index) {
self.buffer.write(renderer.queue(), &store.store, old_pos);
}
}
pub fn finalize(&mut self, renderer: &mut GpuRenderer) {
let (mut changed, mut pos, mut count, mut last_layer, mut start_pos) =
(false, 0, 0, 0, 0);
if self.needed_size > self.buffer.max {
self.resize(renderer.gpu_device(), self.needed_size / K::stride());
changed = true;
}
self.buffer.count = self.needed_size / K::stride();
self.buffer.len = self.needed_size;
self.unprocessed.par_sort();
self.buffers.clear();
for processing in self.unprocessed.iter() {
if last_layer != processing.0 {
if count != 0 {
self.buffers.push(Some(InstanceDetails {
start: start_pos,
end: count,
}));
}
start_pos = count;
if processing.0 - self.buffers.len() > 0
|| self.buffers.is_empty()
{
let count = processing.0 - self.buffers.len();
for _ in 0..count {
self.buffers.push(None);
}
}
last_layer = processing.0;
}
self.buffer_write(
renderer,
&processing.1,
&mut pos,
&mut count,
changed,
);
}
if start_pos != count {
self.buffers.push(Some(InstanceDetails {
start: start_pos,
end: count,
}));
}
self.needed_size = 0;
self.unprocessed.clear();
}
fn resize(&mut self, gpu_device: &GpuDevice, capacity: usize) {
let data = K::with_capacity(capacity, 0);
self.buffer = GpuBuffer::new(
gpu_device,
&data.vertexs,
wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
Some("Vertex Buffer"),
);
}
pub fn new(gpu_device: &GpuDevice) -> Self {
Self::create_buffer(gpu_device, &K::default_buffer().vertexs)
}
pub fn count(&self) -> u32 {
self.buffer.count as u32
}
pub fn len(&self) -> u64 {
self.buffer.len as u64
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
pub fn max(&self) -> usize {
self.buffer.max
}
pub fn stride(&self) -> usize {
K::stride()
}
pub fn instances(
&self,
bounds: Option<Range<u64>>,
) -> wgpu::BufferSlice<'_> {
let range = if let Some(bounds) = bounds {
bounds
} else {
0..self.len()
};
self.buffer.buffer_slice(range)
}
pub fn with_capacity(gpu_device: &GpuDevice, capacity: usize) -> Self {
Self::create_buffer(gpu_device, &K::with_capacity(capacity, 0).vertexs)
}
}