scirs2_core/memory/metrics/
gpu.rs1use crate::gpu::{GpuBackend, GpuBuffer, GpuContext, GpuDataType, GpuKernelHandle};
6use crate::memory::metrics::{track_allocation, track_deallocation};
7
8pub struct TrackedGpuBuffer<T: GpuDataType> {
10 inner: GpuBuffer<T>,
12 component_name: String,
14 size_bytes: usize,
16 is_tracked: bool,
18}
19
20impl<T: GpuDataType> TrackedGpuBuffer<T> {
21 pub fn new(buffer: GpuBuffer<T>, componentname: impl Into<String>) -> Self {
23 let size_bytes = buffer.len() * std::mem::size_of::<T>();
24 let component_name = componentname.into();
25
26 track_allocation(&component_name, size_bytes, &buffer as *const _ as usize);
28
29 Self {
30 inner: buffer,
31 component_name,
32 size_bytes,
33 is_tracked: true,
34 }
35 }
36
37 pub fn len(&self) -> usize {
39 self.inner.len()
40 }
41
42 pub fn is_empty(&self) -> bool {
44 self.inner.is_empty()
45 }
46
47 pub fn size_bytes(&self) -> usize {
49 self.size_bytes
50 }
51
52 pub fn component_name(&self) -> &str {
54 &self.component_name
55 }
56
57 pub fn copy_from_host(&self, data: &[T]) {
59 let _ = self.inner.copy_from_host(data);
60 }
61
62 pub fn copy_to_host(&self, data: &mut [T]) {
64 let _ = self.inner.copy_to_host(data);
65 }
66
67 pub fn to_vec(&self) -> Vec<T> {
69 self.inner.to_vec()
70 }
71
72 pub const fn inner(&self) -> &GpuBuffer<T> {
74 &self.inner
75 }
76
77 pub fn stop_tracking(&mut self) {
81 if self.is_tracked {
82 track_deallocation(
83 &self.component_name,
84 self.size_bytes,
85 &self.inner as *const _ as usize,
86 );
87 self.is_tracked = false;
88 }
89 }
90
91 pub fn resume_tracking(&mut self) {
93 if !self.is_tracked {
94 track_allocation(
95 &self.component_name,
96 self.size_bytes,
97 &self.inner as *const _ as usize,
98 );
99 self.is_tracked = true;
100 }
101 }
102}
103
104impl<T: GpuDataType> Drop for TrackedGpuBuffer<T> {
105 fn drop(&mut self) {
106 if self.is_tracked {
108 track_deallocation(
109 &self.component_name,
110 self.size_bytes,
111 &self.inner as *const _ as usize,
112 );
113 }
114 }
115}
116
117pub struct TrackedGpuContext {
119 inner: GpuContext,
121 component_name: String,
123}
124
125impl TrackedGpuContext {
126 pub fn new(context: GpuContext, componentname: impl Into<String>) -> Self {
128 Self {
129 inner: context,
130 component_name: componentname.into(),
131 }
132 }
133
134 pub fn with_backend(
136 backend: GpuBackend,
137 component_name: impl Into<String>,
138 ) -> Result<Self, crate::gpu::GpuError> {
139 let context = GpuContext::new(backend)?;
140 Ok(Self::new(context, component_name))
141 }
142
143 pub fn backend(&self) -> GpuBackend {
145 self.inner.backend()
146 }
147
148 pub fn backend_name(&self) -> &str {
150 self.inner.backend_name()
151 }
152
153 pub fn create_buffer<T: GpuDataType>(&self, size: usize) -> TrackedGpuBuffer<T> {
155 let buffer = self.inner.create_buffer::<T>(size);
156 let buffer_name = format!("{}:{:p}", self.component_name, &buffer);
157 TrackedGpuBuffer::new(buffer, buffer_name)
158 }
159
160 pub fn create_buffer_from_slice<T: GpuDataType>(&self, data: &[T]) -> TrackedGpuBuffer<T> {
162 let buffer = self.inner.create_buffer_from_slice(data);
163 let buffer_name = format!("{}:{:p}", self.component_name, &buffer);
164 TrackedGpuBuffer::new(buffer, buffer_name)
165 }
166
167 pub fn execute<F, R>(&self, f: F) -> R
169 where
170 F: FnOnce(&crate::gpu::GpuCompiler) -> R,
171 {
172 self.inner.execute(f)
173 }
174
175 pub fn get_kernel(&self, name: &str) -> Result<GpuKernelHandle, crate::gpu::GpuError> {
177 self.inner.get_kernel(name)
178 }
179
180 pub fn get_specialized_kernel(
182 &self,
183 name: &str,
184 params: &crate::gpu::kernels::KernelParams,
185 ) -> Result<GpuKernelHandle, crate::gpu::GpuError> {
186 self.inner.get_specialized_kernel(name, params)
187 }
188
189 pub const fn inner(&self) -> &GpuContext {
191 &self.inner
192 }
193}
194
195#[allow(dead_code)]
200pub fn setup_gpu_memory_tracking() {
201 }
212
213#[cfg(test)]
214mod tests {
215 use super::*;
216 use crate::memory::metrics::{generate_memory_report, reset_memory_metrics};
217
218 #[test]
219 fn test_tracked_gpu_buffer() {
220 let context = match GpuContext::new(GpuBackend::Cpu) {
222 Ok(ctx) => ctx,
223 Err(_) => return, };
225
226 reset_memory_metrics();
228
229 let tracked_ctx = TrackedGpuContext::new(context, "GpuTests");
231
232 let buffersize = 1000;
234 let element_size = std::mem::size_of::<f32>();
235 let buffer = tracked_ctx.create_buffer::<f32>(buffersize);
236
237 let report = generate_memory_report();
239 assert!(report.total_current_usage > 0);
240 assert_eq!(report.total_allocation_count, 1);
241
242 let buffer_component = report
244 .component_stats
245 .keys()
246 .find(|name| name.starts_with("GpuTests:"))
247 .expect("Should have a buffer component");
248
249 let component_stats = &report.component_stats[buffer_component];
251 assert_eq!(component_stats.current_usage, buffersize * element_size);
252
253 drop(buffer);
255
256 let report = generate_memory_report();
258 assert_eq!(report.total_current_usage, 0);
259 }
260}