1use std::{
2 hash::Hash,
3 num::NonZeroU64,
4 ops::{Bound, Range, RangeBounds},
5};
6
7use uuid::Uuid;
8use wgpu::util::DeviceExt;
9
10use crate::context::Context;
11
12#[derive(Clone, Debug)]
16pub struct Buffer {
17 id: Uuid,
18 buffer: wgpu::Buffer,
19}
20
21#[derive(Clone, PartialEq, Eq, Hash, Debug)]
23pub struct BufferBinding {
24 pub(crate) buffer: Buffer,
25 pub(crate) binding_type: wgpu::BufferBindingType,
26 pub(crate) has_dynamic_offset: bool,
27 pub(crate) min_binding_size: Option<NonZeroU64>,
28}
29
30impl Buffer {
31 pub fn new(
33 label: wgpu::Label,
34 usage: wgpu::BufferUsages,
35 size: usize,
36 context: &Context,
37 ) -> Self {
38 let buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
39 label,
40 usage,
41 size: size as u64,
42 mapped_at_creation: false,
43 });
44
45 Self {
46 id: Uuid::new_v4(),
47 buffer,
48 }
49 }
50
51 pub fn with_data(
53 label: wgpu::Label,
54 usage: wgpu::BufferUsages,
55 data: &[u8],
56 context: &Context,
57 ) -> Self {
58 let buffer = context
59 .device()
60 .create_buffer_init(&wgpu::util::BufferInitDescriptor {
61 label,
62 usage,
63 contents: data,
64 });
65
66 Self {
67 id: Uuid::new_v4(),
68 buffer,
69 }
70 }
71
72 pub fn ensure_capacity(&mut self, new_size: usize, context: &Context) {
74 if new_size > self.buffer.size() as usize {
75 self.buffer = context.device().create_buffer(&wgpu::BufferDescriptor {
76 label: None,
77 usage: self.buffer.usage(),
78 size: new_size as u64,
79 mapped_at_creation: false,
80 });
81 }
82 }
83
84 pub fn write(&self, data: &[u8], context: &Context) {
86 context.queue().write_buffer(&self.buffer, 0, data);
87 }
88
89 pub(crate) fn buffer(&self) -> &wgpu::Buffer {
90 &self.buffer
91 }
92
93 pub fn slice<R>(&self, bounds: R) -> BufferSlice
95 where
96 R: RangeBounds<wgpu::BufferAddress>,
97 {
98 BufferSlice {
99 buffer: self.buffer.clone(),
100 bounds: constrain_range_to_container_len(bounds, self.buffer.size()),
101 }
102 }
103
104 #[must_use]
106 pub fn uniform_binding(&self) -> BufferBinding {
107 BufferBinding {
108 buffer: self.clone(),
109 binding_type: wgpu::BufferBindingType::Uniform,
110 has_dynamic_offset: false,
111 min_binding_size: None,
112 }
113 }
114
115 #[must_use]
117 pub fn storage_binding(&self, read_only: bool) -> BufferBinding {
118 BufferBinding {
119 buffer: self.clone(),
120 binding_type: wgpu::BufferBindingType::Storage { read_only },
121 has_dynamic_offset: false,
122 min_binding_size: None,
123 }
124 }
125
126 pub fn unmap(&self) {
128 self.buffer.unmap();
129 }
130}
131
132impl BufferBinding {
133 pub fn dynamic_offset(mut self, min_binding_size: u64) -> Self {
134 self.has_dynamic_offset = true;
135 self.min_binding_size = NonZeroU64::new(min_binding_size);
136 self
137 }
138}
139
140impl Hash for Buffer {
141 fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
142 self.id.hash(state);
143 }
144}
145
146impl PartialEq for Buffer {
147 fn eq(&self, other: &Self) -> bool {
148 self.id == other.id
149 }
150}
151
152impl Eq for Buffer {}
153
154#[derive(Debug)]
156pub struct BufferSlice {
157 buffer: wgpu::Buffer,
158 bounds: Range<wgpu::BufferAddress>,
159}
160
161impl BufferSlice {
163 pub fn get(&self) -> wgpu::BufferSlice {
165 self.buffer.slice(self.bounds.clone())
166 }
167}
168
169fn constrain_range_to_container_len<R>(range: R, container_len: u64) -> Range<u64>
170where
171 R: RangeBounds<u64>,
172{
173 let start = match range.start_bound() {
174 Bound::Included(t) => *t,
175 Bound::Excluded(t) => *t + 1,
176 Bound::Unbounded => 0,
177 };
178
179 let end = match range.end_bound() {
180 Bound::Included(t) => *t + 1,
181 Bound::Excluded(t) => *t,
182 Bound::Unbounded => container_len,
183 };
184
185 start..end
186}