jxl_grid/
alloc_tracker.rs1use std::sync::{
2 Arc,
3 atomic::{AtomicUsize, Ordering},
4};
5
6#[derive(Debug, Clone)]
8pub struct AllocTracker {
9 inner: Arc<AllocTrackerInner>,
10}
11
12#[derive(Debug)]
13struct AllocTrackerInner {
14 bytes_left: AtomicUsize,
15}
16
17impl AllocTracker {
18 pub fn with_limit(bytes_left: usize) -> Self {
20 Self {
21 inner: Arc::new(AllocTrackerInner {
22 bytes_left: AtomicUsize::new(bytes_left),
23 }),
24 }
25 }
26
27 pub fn alloc<T>(&self, count: usize) -> Result<AllocHandle, crate::OutOfMemory> {
31 let bytes = count * std::mem::size_of::<T>();
32 let result = self.inner.bytes_left.fetch_update(
33 Ordering::Relaxed,
34 Ordering::Relaxed,
35 |bytes_left| bytes_left.checked_sub(bytes),
36 );
37
38 match result {
39 Ok(prev) => {
40 tracing::trace!(bytes, left = prev - bytes, "Created allocation handle");
41 Ok(AllocHandle {
42 bytes,
43 inner: Arc::clone(&self.inner),
44 })
45 }
46 Err(left) => {
47 tracing::trace!(bytes, left, "Allocation failed");
48 Err(crate::OutOfMemory::new(bytes))
49 }
50 }
51 }
52
53 pub fn expand_limit(&self, by_bytes: usize) {
55 self.inner.bytes_left.fetch_add(by_bytes, Ordering::Relaxed);
56 }
57
58 pub fn shrink_limit(&self, by_bytes: usize) -> Result<(), crate::OutOfMemory> {
63 let result = self.inner.bytes_left.fetch_update(
64 Ordering::Relaxed,
65 Ordering::Relaxed,
66 |bytes_left| bytes_left.checked_sub(by_bytes),
67 );
68
69 if result.is_ok() {
70 Ok(())
71 } else {
72 Err(crate::OutOfMemory::new(by_bytes))
73 }
74 }
75}
76
77#[derive(Debug)]
79pub struct AllocHandle {
80 bytes: usize,
81 inner: Arc<AllocTrackerInner>,
82}
83
84impl Drop for AllocHandle {
85 fn drop(&mut self) {
86 let bytes = self.bytes;
87 let prev = self.inner.bytes_left.fetch_add(bytes, Ordering::Relaxed);
88 tracing::trace!(bytes, left = prev + bytes, "Released allocation handle");
89 self.bytes = 0;
90 }
91}
92
93impl AllocHandle {
94 pub fn tracker(&self) -> AllocTracker {
96 AllocTracker {
97 inner: Arc::clone(&self.inner),
98 }
99 }
100}