1use crate::{
2 allocator::{Allocator, Kind},
3 block::Block,
4 mapping::MappedRange,
5 memory::Memory,
6 AtomSize, Size,
7};
8use hal::{device::Device as _, Backend};
9use std::{collections::VecDeque, ops::Range, ptr::NonNull, sync::Arc};
10
11type LineCount = u32;
12
13#[derive(Debug)]
15pub struct LinearBlock<B: Backend> {
16 memory: Arc<Memory<B>>,
17 line_index: LineCount,
18 ptr: Option<NonNull<u8>>,
19 range: Range<Size>,
20}
21
22unsafe impl<B: Backend> Send for LinearBlock<B> {}
23unsafe impl<B: Backend> Sync for LinearBlock<B> {}
24
25impl<B: Backend> LinearBlock<B> {
26 pub fn size(&self) -> Size {
28 self.range.end - self.range.start
29 }
30}
31
32impl<B: Backend> Block<B> for LinearBlock<B> {
33 fn properties(&self) -> hal::memory::Properties {
34 self.memory.properties()
35 }
36
37 fn memory(&self) -> &B::Memory {
38 self.memory.raw()
39 }
40
41 fn segment(&self) -> hal::memory::Segment {
42 hal::memory::Segment {
43 offset: self.range.start,
44 size: Some(self.range.end - self.range.start),
45 }
46 }
47
48 fn map<'a>(
49 &'a mut self,
50 _device: &B::Device,
51 segment: hal::memory::Segment,
52 ) -> Result<MappedRange<'a, B>, hal::device::MapError> {
53 let requested_range = crate::segment_to_sub_range(segment, &self.range)?;
54
55 let mapping_range = match self.memory.non_coherent_atom_size {
56 Some(atom) => crate::align_range(&requested_range, atom),
57 None => requested_range.clone(),
58 };
59
60 Ok(unsafe {
61 MappedRange::from_raw(
62 &self.memory,
63 self.ptr
64 .ok_or(hal::device::MapError::MappingFailed)?
66 .as_ptr()
67 .offset((mapping_range.start - self.range.start) as isize),
68 mapping_range,
69 requested_range,
70 )
71 })
72 }
73}
74
75#[derive(Clone, Copy, Debug)]
78pub struct LinearConfig {
79 pub line_size: Size,
82}
83
84#[derive(Debug)]
96pub struct LinearAllocator<B: Backend> {
97 memory_type: hal::MemoryTypeId,
98 memory_properties: hal::memory::Properties,
99 line_size: Size,
100 finished_lines_count: LineCount,
101 lines: VecDeque<Line<B>>,
102 non_coherent_atom_size: Option<AtomSize>,
103 unused_lines: Vec<Line<B>>,
105}
106
107#[derive(Debug)]
109struct Line<B: Backend> {
110 allocated: Size,
112 freed: Size,
114 memory: Arc<Memory<B>>,
115 ptr: Option<NonNull<u8>>,
116}
117
118impl<B: Backend> Line<B> {
119 unsafe fn free_memory(self, device: &B::Device) -> Size {
120 match Arc::try_unwrap(self.memory) {
121 Ok(memory) => {
122 log::trace!("Freed `Line` of size {}", memory.size());
123 if memory.is_mappable() {
124 device.unmap_memory(memory.raw());
125 }
126
127 let freed = memory.size();
128 device.free_memory(memory.into_raw());
129 freed
130 }
131 Err(_) => {
132 log::error!("Allocated `Line` was freed, but memory is still shared.");
133 0
134 }
135 }
136 }
137}
138
139unsafe impl<B: Backend> Send for Line<B> {}
140unsafe impl<B: Backend> Sync for Line<B> {}
141
142impl<B: Backend> LinearAllocator<B> {
143 pub fn new(
147 memory_type: hal::MemoryTypeId,
148 memory_properties: hal::memory::Properties,
149 config: LinearConfig,
150 non_coherent_atom_size: Size,
151 ) -> Self {
152 log::trace!(
153 "Create new 'linear' allocator: type: '{:?}', properties: '{:#?}' config: '{:#?}'",
154 memory_type,
155 memory_properties,
156 config
157 );
158 let (line_size, non_coherent_atom_size) =
159 if crate::is_non_coherent_visible(memory_properties) {
160 let atom = AtomSize::new(non_coherent_atom_size);
161 (crate::align_size(config.line_size, atom.unwrap()), atom)
162 } else {
163 (config.line_size, None)
164 };
165
166 LinearAllocator {
167 memory_type,
168 memory_properties,
169 line_size,
170 finished_lines_count: 0,
171 lines: VecDeque::new(),
172 unused_lines: Vec::new(),
173 non_coherent_atom_size,
174 }
175 }
176
177 pub fn max_allocation(&self) -> Size {
179 self.line_size / 2
180 }
181
182 fn cleanup(&mut self, device: &B::Device, free_memory: bool) -> Size {
183 let mut freed = 0;
184 while !self.lines.is_empty() {
185 if self.lines[0].allocated > self.lines[0].freed {
186 break;
187 }
188
189 let line = self.lines.pop_front().unwrap();
190 self.finished_lines_count += 1;
191
192 if free_memory {
193 unsafe {
194 freed += line.free_memory(device);
195 }
196 } else if Arc::strong_count(&line.memory) == 1 {
197 self.unused_lines.push(line);
198 } else {
199 log::error!("Allocated `Line` was freed, but memory is still shared.");
200 }
201 }
202 freed
203 }
204
205 pub fn clear(&mut self, device: &B::Device) -> Size {
207 let mut freed = self.cleanup(device, true);
208
209 for line in self.unused_lines.drain(..) {
210 freed += self.line_size;
211 unsafe {
212 line.free_memory(device);
213 }
214 }
215
216 freed
217 }
218}
219
220impl<B: Backend> Allocator<B> for LinearAllocator<B> {
221 type Block = LinearBlock<B>;
222
223 const KIND: Kind = Kind::Linear;
224
225 fn alloc(
226 &mut self,
227 device: &B::Device,
228 size: Size,
229 align: Size,
230 ) -> Result<(LinearBlock<B>, Size), hal::device::AllocationError> {
231 let (size, align) = match self.non_coherent_atom_size {
232 Some(atom) => (
233 crate::align_size(size, atom),
234 crate::align_size(align, atom),
235 ),
236 None => (size, align),
237 };
238
239 if size > self.line_size || align > self.line_size {
240 return Err(hal::device::AllocationError::TooManyObjects);
242 }
243
244 let lines_count = self.lines.len() as LineCount;
245 if let Some(line) = self.lines.back_mut() {
246 let aligned_offset =
247 crate::align_offset(line.allocated, unsafe { AtomSize::new_unchecked(align) });
248 if aligned_offset + size <= self.line_size {
249 line.freed += aligned_offset - line.allocated;
250 line.allocated = aligned_offset + size;
251
252 let block = LinearBlock {
253 line_index: self.finished_lines_count + lines_count - 1,
254 memory: Arc::clone(&line.memory),
255 ptr: line.ptr.map(|ptr| unsafe {
256 NonNull::new_unchecked(ptr.as_ptr().offset(aligned_offset as isize))
257 }),
258 range: aligned_offset..aligned_offset + size,
259 };
260
261 return Ok((block, 0));
262 }
263 }
264
265 let (line, new_allocation_size) = match self.unused_lines.pop() {
266 Some(mut line) => {
267 line.allocated = size;
268 line.freed = 0;
269 (line, 0)
270 }
271 None => {
272 log::trace!("Allocated `Line` of size {}", self.line_size);
273 let (memory, ptr) = unsafe {
274 super::allocate_memory_helper(
275 device,
276 self.memory_type,
277 self.line_size,
278 self.memory_properties,
279 self.non_coherent_atom_size,
280 )?
281 };
282
283 (
284 Line {
285 allocated: size,
286 freed: 0,
287 ptr,
288 memory: Arc::new(memory),
289 },
290 self.line_size,
291 )
292 }
293 };
294
295 let block = LinearBlock {
296 line_index: self.finished_lines_count + lines_count,
297 memory: Arc::clone(&line.memory),
298 ptr: line.ptr,
299 range: 0..size,
300 };
301
302 self.lines.push_back(line);
303 Ok((block, new_allocation_size))
304 }
305
306 fn free(&mut self, device: &B::Device, block: Self::Block) -> Size {
307 let index = (block.line_index - self.finished_lines_count) as usize;
308 self.lines[index].freed += block.size();
309 drop(block);
310 self.cleanup(device, false)
311 }
312}
313
314impl<B: Backend> Drop for LinearAllocator<B> {
315 fn drop(&mut self) {
316 if !self.lines.is_empty() {
317 log::error!("Not all allocations from LinearAllocator were freed");
318 }
319 }
320}