1use std::collections::HashMap;
5use std::sync::atomic::{AtomicU64, Ordering};
6use std::fmt;
7
8#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
14pub struct BufferHandle(pub u64);
15
16#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
18pub struct TextureHandle(pub u64);
19
20#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
22pub struct ShaderHandle(pub u64);
23
24#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
26pub struct PipelineHandle(pub u64);
27
28#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
30pub struct ComputePipelineHandle(pub u64);
31
32#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
38pub enum GpuBackend {
39 OpenGL,
40 Vulkan,
41 Metal,
42 WebGPU,
43 Software,
44}
45
46impl fmt::Display for GpuBackend {
47 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
48 match self {
49 Self::OpenGL => write!(f, "OpenGL"),
50 Self::Vulkan => write!(f, "Vulkan"),
51 Self::Metal => write!(f, "Metal"),
52 Self::WebGPU => write!(f, "WebGPU"),
53 Self::Software => write!(f, "Software"),
54 }
55 }
56}
57
58#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
60pub struct BufferUsage(pub u32);
61
62impl BufferUsage {
63 pub const VERTEX: BufferUsage = BufferUsage(1 << 0);
64 pub const INDEX: BufferUsage = BufferUsage(1 << 1);
65 pub const UNIFORM: BufferUsage = BufferUsage(1 << 2);
66 pub const STORAGE: BufferUsage = BufferUsage(1 << 3);
67 pub const INDIRECT: BufferUsage = BufferUsage(1 << 4);
68 pub const COPY_SRC: BufferUsage = BufferUsage(1 << 5);
69 pub const COPY_DST: BufferUsage = BufferUsage(1 << 6);
70
71 pub fn contains(self, other: BufferUsage) -> bool {
72 (self.0 & other.0) == other.0
73 }
74}
75
76impl std::ops::BitOr for BufferUsage {
77 type Output = Self;
78 fn bitor(self, rhs: Self) -> Self { BufferUsage(self.0 | rhs.0) }
79}
80
81impl std::ops::BitAnd for BufferUsage {
82 type Output = Self;
83 fn bitand(self, rhs: Self) -> Self { BufferUsage(self.0 & rhs.0) }
84}
85
86#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
88pub enum TextureFormat {
89 R8,
90 RGBA8,
91 RGBA16F,
92 RGBA32F,
93 Depth24,
94 Depth32F,
95}
96
97impl TextureFormat {
98 pub fn bytes_per_pixel(&self) -> usize {
100 match self {
101 Self::R8 => 1,
102 Self::RGBA8 => 4,
103 Self::RGBA16F => 8,
104 Self::RGBA32F => 16,
105 Self::Depth24 => 3,
106 Self::Depth32F => 4,
107 }
108 }
109
110 pub fn is_depth(&self) -> bool {
112 matches!(self, Self::Depth24 | Self::Depth32F)
113 }
114}
115
116#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
118pub enum ShaderStage {
119 Vertex,
120 Fragment,
121 Compute,
122}
123
124#[derive(Debug, Clone)]
130pub struct BackendCapabilities {
131 pub compute_shaders: bool,
132 pub max_texture_size: u32,
133 pub max_ssbo_size: u64,
134 pub max_workgroup_size: [u32; 3],
135 pub indirect_draw: bool,
136 pub multi_draw_indirect: bool,
137}
138
139impl Default for BackendCapabilities {
140 fn default() -> Self {
141 Self {
142 compute_shaders: false,
143 max_texture_size: 4096,
144 max_ssbo_size: 128 * 1024 * 1024,
145 max_workgroup_size: [256, 256, 64],
146 indirect_draw: false,
147 multi_draw_indirect: false,
148 }
149 }
150}
151
152impl BackendCapabilities {
153 pub fn for_backend(backend: GpuBackend) -> Self {
155 match backend {
156 GpuBackend::Vulkan => Self {
157 compute_shaders: true,
158 max_texture_size: 16384,
159 max_ssbo_size: 2 * 1024 * 1024 * 1024,
160 max_workgroup_size: [1024, 1024, 64],
161 indirect_draw: true,
162 multi_draw_indirect: true,
163 },
164 GpuBackend::Metal => Self {
165 compute_shaders: true,
166 max_texture_size: 16384,
167 max_ssbo_size: 1024 * 1024 * 1024,
168 max_workgroup_size: [1024, 1024, 64],
169 indirect_draw: true,
170 multi_draw_indirect: true,
171 },
172 GpuBackend::WebGPU => Self {
173 compute_shaders: true,
174 max_texture_size: 8192,
175 max_ssbo_size: 256 * 1024 * 1024,
176 max_workgroup_size: [256, 256, 64],
177 indirect_draw: true,
178 multi_draw_indirect: false,
179 },
180 GpuBackend::OpenGL => Self {
181 compute_shaders: true,
182 max_texture_size: 8192,
183 max_ssbo_size: 128 * 1024 * 1024,
184 max_workgroup_size: [512, 512, 64],
185 indirect_draw: true,
186 multi_draw_indirect: true,
187 },
188 GpuBackend::Software => Self {
189 compute_shaders: true,
190 max_texture_size: 4096,
191 max_ssbo_size: 512 * 1024 * 1024,
192 max_workgroup_size: [256, 256, 64],
193 indirect_draw: false,
194 multi_draw_indirect: false,
195 },
196 }
197 }
198
199 pub fn workgroup_fits(&self, x: u32, y: u32, z: u32) -> bool {
201 x <= self.max_workgroup_size[0]
202 && y <= self.max_workgroup_size[1]
203 && z <= self.max_workgroup_size[2]
204 }
205}
206
207pub fn detect_backend() -> GpuBackend {
213 if cfg!(target_os = "macos") || cfg!(target_os = "ios") {
216 GpuBackend::Metal
217 } else if cfg!(target_os = "windows") {
218 GpuBackend::Vulkan
220 } else if cfg!(target_os = "linux") {
221 GpuBackend::Vulkan
222 } else if cfg!(target_arch = "wasm32") {
223 GpuBackend::WebGPU
224 } else {
225 GpuBackend::Software
226 }
227}
228
229#[derive(Debug, Clone)]
235pub enum GpuCommand {
236 CopyBufferToBuffer {
237 src: BufferHandle,
238 dst: BufferHandle,
239 size: usize,
240 },
241 CopyBufferToTexture {
242 src: BufferHandle,
243 dst: TextureHandle,
244 width: u32,
245 height: u32,
246 },
247 Draw {
248 pipeline: PipelineHandle,
249 vertex_buffer: BufferHandle,
250 vertex_count: u32,
251 instance_count: u32,
252 },
253 DrawIndexed {
254 pipeline: PipelineHandle,
255 vertex_buffer: BufferHandle,
256 index_buffer: BufferHandle,
257 index_count: u32,
258 instance_count: u32,
259 },
260 Dispatch {
261 pipeline: ComputePipelineHandle,
262 x: u32,
263 y: u32,
264 z: u32,
265 },
266 SetBindGroup {
267 index: u32,
268 buffers: Vec<BufferHandle>,
269 },
270 Barrier,
271}
272
273#[derive(Debug, Clone, Default)]
275pub struct PipelineLayout {
276 pub bind_group_layouts: Vec<BindGroupLayoutDesc>,
277}
278
279#[derive(Debug, Clone)]
281pub struct BindGroupLayoutDesc {
282 pub entries: Vec<BindGroupLayoutEntry>,
283}
284
285#[derive(Debug, Clone)]
287pub struct BindGroupLayoutEntry {
288 pub binding: u32,
289 pub visibility: ShaderStage,
290 pub ty: BindingType,
291}
292
293#[derive(Debug, Clone, Copy, PartialEq, Eq)]
295pub enum BindingType {
296 UniformBuffer,
297 StorageBuffer,
298 Texture,
299 Sampler,
300}
301
302pub trait BackendContext: Send + Sync {
308 fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle;
310
311 fn create_texture(&mut self, width: u32, height: u32, format: TextureFormat) -> TextureHandle;
313
314 fn create_shader(&mut self, source: &str, stage: ShaderStage) -> ShaderHandle;
316
317 fn create_pipeline(
319 &mut self,
320 vertex: ShaderHandle,
321 fragment: ShaderHandle,
322 layout: &PipelineLayout,
323 ) -> PipelineHandle;
324
325 fn create_compute_pipeline(
327 &mut self,
328 shader: ShaderHandle,
329 layout: &PipelineLayout,
330 ) -> ComputePipelineHandle;
331
332 fn submit(&mut self, commands: &[GpuCommand]);
334
335 fn present(&mut self);
337
338 fn write_buffer(&mut self, buffer: BufferHandle, data: &[u8]);
340
341 fn read_buffer(&self, buffer: BufferHandle) -> Vec<u8>;
343
344 fn write_texture(&mut self, texture: TextureHandle, data: &[u8]);
346
347 fn read_texture(&self, texture: TextureHandle) -> Vec<u8>;
349
350 fn destroy_buffer(&mut self, buffer: BufferHandle);
352
353 fn destroy_texture(&mut self, texture: TextureHandle);
355
356 fn name(&self) -> &str;
358}
359
360static NEXT_HANDLE: AtomicU64 = AtomicU64::new(1);
365
366fn next_handle() -> u64 {
367 NEXT_HANDLE.fetch_add(1, Ordering::Relaxed)
368}
369
370#[derive(Debug, Clone)]
375struct SoftwareBuffer {
376 data: Vec<u8>,
377 usage: BufferUsage,
378}
379
380#[derive(Debug, Clone)]
381struct SoftwareTexture {
382 data: Vec<u8>,
383 width: u32,
384 height: u32,
385 format: TextureFormat,
386}
387
388#[derive(Debug, Clone)]
389struct SoftwareShader {
390 source: String,
391 stage: ShaderStage,
392}
393
394pub struct SoftwareContext {
401 buffers: HashMap<u64, SoftwareBuffer>,
402 textures: HashMap<u64, SoftwareTexture>,
403 shaders: HashMap<u64, SoftwareShader>,
404 command_log: Vec<GpuCommand>,
405}
406
407impl SoftwareContext {
408 pub fn new() -> Self {
409 Self {
410 buffers: HashMap::new(),
411 textures: HashMap::new(),
412 shaders: HashMap::new(),
413 command_log: Vec::new(),
414 }
415 }
416
417 pub fn command_count(&self) -> usize {
419 self.command_log.len()
420 }
421
422 pub fn drain_commands(&mut self) -> Vec<GpuCommand> {
424 std::mem::take(&mut self.command_log)
425 }
426}
427
428impl Default for SoftwareContext {
429 fn default() -> Self { Self::new() }
430}
431
432impl BackendContext for SoftwareContext {
433 fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle {
434 let id = next_handle();
435 self.buffers.insert(id, SoftwareBuffer {
436 data: vec![0u8; size],
437 usage,
438 });
439 BufferHandle(id)
440 }
441
442 fn create_texture(&mut self, width: u32, height: u32, format: TextureFormat) -> TextureHandle {
443 let id = next_handle();
444 let byte_size = (width as usize) * (height as usize) * format.bytes_per_pixel();
445 self.textures.insert(id, SoftwareTexture {
446 data: vec![0u8; byte_size],
447 width,
448 height,
449 format,
450 });
451 TextureHandle(id)
452 }
453
454 fn create_shader(&mut self, source: &str, stage: ShaderStage) -> ShaderHandle {
455 let id = next_handle();
456 self.shaders.insert(id, SoftwareShader {
457 source: source.to_string(),
458 stage,
459 });
460 ShaderHandle(id)
461 }
462
463 fn create_pipeline(
464 &mut self,
465 _vertex: ShaderHandle,
466 _fragment: ShaderHandle,
467 _layout: &PipelineLayout,
468 ) -> PipelineHandle {
469 PipelineHandle(next_handle())
470 }
471
472 fn create_compute_pipeline(
473 &mut self,
474 _shader: ShaderHandle,
475 _layout: &PipelineLayout,
476 ) -> ComputePipelineHandle {
477 ComputePipelineHandle(next_handle())
478 }
479
480 fn submit(&mut self, commands: &[GpuCommand]) {
481 for cmd in commands {
482 match cmd {
483 GpuCommand::CopyBufferToBuffer { src, dst, size } => {
484 let src_data = self.buffers.get(&src.0)
485 .map(|b| b.data[..*size].to_vec())
486 .unwrap_or_default();
487 if let Some(dst_buf) = self.buffers.get_mut(&dst.0) {
488 let len = src_data.len().min(dst_buf.data.len());
489 dst_buf.data[..len].copy_from_slice(&src_data[..len]);
490 }
491 }
492 GpuCommand::CopyBufferToTexture { src, dst, width, height } => {
493 let src_data = self.buffers.get(&src.0)
494 .map(|b| b.data.clone())
495 .unwrap_or_default();
496 if let Some(tex) = self.textures.get_mut(&dst.0) {
497 let len = src_data.len().min(tex.data.len());
498 tex.data[..len].copy_from_slice(&src_data[..len]);
499 }
500 }
501 _ => { }
502 }
503 self.command_log.push(cmd.clone());
504 }
505 }
506
507 fn present(&mut self) {
508 }
510
511 fn write_buffer(&mut self, buffer: BufferHandle, data: &[u8]) {
512 if let Some(buf) = self.buffers.get_mut(&buffer.0) {
513 let len = data.len().min(buf.data.len());
514 buf.data[..len].copy_from_slice(&data[..len]);
515 }
516 }
517
518 fn read_buffer(&self, buffer: BufferHandle) -> Vec<u8> {
519 self.buffers.get(&buffer.0)
520 .map(|b| b.data.clone())
521 .unwrap_or_default()
522 }
523
524 fn write_texture(&mut self, texture: TextureHandle, data: &[u8]) {
525 if let Some(tex) = self.textures.get_mut(&texture.0) {
526 let len = data.len().min(tex.data.len());
527 tex.data[..len].copy_from_slice(&data[..len]);
528 }
529 }
530
531 fn read_texture(&self, texture: TextureHandle) -> Vec<u8> {
532 self.textures.get(&texture.0)
533 .map(|t| t.data.clone())
534 .unwrap_or_default()
535 }
536
537 fn destroy_buffer(&mut self, buffer: BufferHandle) {
538 self.buffers.remove(&buffer.0);
539 }
540
541 fn destroy_texture(&mut self, texture: TextureHandle) {
542 self.textures.remove(&texture.0);
543 }
544
545 fn name(&self) -> &str { "Software" }
546}
547
548pub struct OpenGLContext {
557 inner: SoftwareContext,
558 gl: Option<()>, }
562
563impl OpenGLContext {
564 pub fn new(has_gl: bool) -> Self {
568 Self {
569 inner: SoftwareContext::new(),
570 gl: if has_gl { Some(()) } else { None },
571 }
572 }
573
574 pub fn has_gl(&self) -> bool { self.gl.is_some() }
576}
577
578impl Default for OpenGLContext {
579 fn default() -> Self { Self::new(false) }
580}
581
582impl BackendContext for OpenGLContext {
583 fn create_buffer(&mut self, size: usize, usage: BufferUsage) -> BufferHandle {
584 self.inner.create_buffer(size, usage)
586 }
587
588 fn create_texture(&mut self, width: u32, height: u32, format: TextureFormat) -> TextureHandle {
589 self.inner.create_texture(width, height, format)
590 }
591
592 fn create_shader(&mut self, source: &str, stage: ShaderStage) -> ShaderHandle {
593 self.inner.create_shader(source, stage)
594 }
595
596 fn create_pipeline(
597 &mut self,
598 vertex: ShaderHandle,
599 fragment: ShaderHandle,
600 layout: &PipelineLayout,
601 ) -> PipelineHandle {
602 self.inner.create_pipeline(vertex, fragment, layout)
603 }
604
605 fn create_compute_pipeline(
606 &mut self,
607 shader: ShaderHandle,
608 layout: &PipelineLayout,
609 ) -> ComputePipelineHandle {
610 self.inner.create_compute_pipeline(shader, layout)
611 }
612
613 fn submit(&mut self, commands: &[GpuCommand]) {
614 self.inner.submit(commands);
615 }
616
617 fn present(&mut self) {
618 self.inner.present();
620 }
621
622 fn write_buffer(&mut self, buffer: BufferHandle, data: &[u8]) {
623 self.inner.write_buffer(buffer, data);
624 }
625
626 fn read_buffer(&self, buffer: BufferHandle) -> Vec<u8> {
627 self.inner.read_buffer(buffer)
628 }
629
630 fn write_texture(&mut self, texture: TextureHandle, data: &[u8]) {
631 self.inner.write_texture(texture, data);
632 }
633
634 fn read_texture(&self, texture: TextureHandle) -> Vec<u8> {
635 self.inner.read_texture(texture)
636 }
637
638 fn destroy_buffer(&mut self, buffer: BufferHandle) {
639 self.inner.destroy_buffer(buffer);
640 }
641
642 fn destroy_texture(&mut self, texture: TextureHandle) {
643 self.inner.destroy_texture(texture);
644 }
645
646 fn name(&self) -> &str { "OpenGL" }
647}
648
649#[cfg(test)]
654mod tests {
655 use super::*;
656
657 #[test]
658 fn buffer_usage_flags_combine() {
659 let usage = BufferUsage::VERTEX | BufferUsage::COPY_DST;
660 assert!(usage.contains(BufferUsage::VERTEX));
661 assert!(usage.contains(BufferUsage::COPY_DST));
662 assert!(!usage.contains(BufferUsage::INDEX));
663 }
664
665 #[test]
666 fn texture_format_bytes() {
667 assert_eq!(TextureFormat::R8.bytes_per_pixel(), 1);
668 assert_eq!(TextureFormat::RGBA8.bytes_per_pixel(), 4);
669 assert_eq!(TextureFormat::RGBA32F.bytes_per_pixel(), 16);
670 assert!(TextureFormat::Depth32F.is_depth());
671 assert!(!TextureFormat::RGBA8.is_depth());
672 }
673
674 #[test]
675 fn backend_display() {
676 assert_eq!(format!("{}", GpuBackend::Vulkan), "Vulkan");
677 assert_eq!(format!("{}", GpuBackend::Software), "Software");
678 }
679
680 #[test]
681 fn capabilities_for_backend() {
682 let caps = BackendCapabilities::for_backend(GpuBackend::Vulkan);
683 assert!(caps.compute_shaders);
684 assert_eq!(caps.max_texture_size, 16384);
685 assert!(caps.multi_draw_indirect);
686
687 let sw = BackendCapabilities::for_backend(GpuBackend::Software);
688 assert!(!sw.indirect_draw);
689 }
690
691 #[test]
692 fn workgroup_fits() {
693 let caps = BackendCapabilities::for_backend(GpuBackend::Vulkan);
694 assert!(caps.workgroup_fits(1024, 1, 1));
695 assert!(!caps.workgroup_fits(2048, 1, 1));
696 }
697
698 #[test]
699 fn detect_backend_is_deterministic() {
700 let a = detect_backend();
701 let b = detect_backend();
702 assert_eq!(a, b);
703 }
704
705 #[test]
706 fn software_context_create_buffer() {
707 let mut ctx = SoftwareContext::new();
708 let buf = ctx.create_buffer(128, BufferUsage::VERTEX);
709 assert_ne!(buf.0, 0);
710 let data = ctx.read_buffer(buf);
711 assert_eq!(data.len(), 128);
712 assert!(data.iter().all(|&b| b == 0));
713 }
714
715 #[test]
716 fn software_context_write_read_buffer() {
717 let mut ctx = SoftwareContext::new();
718 let buf = ctx.create_buffer(8, BufferUsage::STORAGE);
719 ctx.write_buffer(buf, &[1, 2, 3, 4, 5, 6, 7, 8]);
720 let out = ctx.read_buffer(buf);
721 assert_eq!(out, vec![1, 2, 3, 4, 5, 6, 7, 8]);
722 }
723
724 #[test]
725 fn software_context_texture() {
726 let mut ctx = SoftwareContext::new();
727 let tex = ctx.create_texture(2, 2, TextureFormat::RGBA8);
728 let data = ctx.read_texture(tex);
730 assert_eq!(data.len(), 16);
731
732 let pixels = vec![255u8; 16];
733 ctx.write_texture(tex, &pixels);
734 assert_eq!(ctx.read_texture(tex), pixels);
735 }
736
737 #[test]
738 fn software_context_shader_and_pipeline() {
739 let mut ctx = SoftwareContext::new();
740 let vs = ctx.create_shader("void main(){}", ShaderStage::Vertex);
741 let fs = ctx.create_shader("void main(){}", ShaderStage::Fragment);
742 let layout = PipelineLayout::default();
743 let pipe = ctx.create_pipeline(vs, fs, &layout);
744 assert_ne!(pipe.0, 0);
745 }
746
747 #[test]
748 fn software_context_compute_pipeline() {
749 let mut ctx = SoftwareContext::new();
750 let cs = ctx.create_shader("void main(){}", ShaderStage::Compute);
751 let layout = PipelineLayout::default();
752 let cp = ctx.create_compute_pipeline(cs, &layout);
753 assert_ne!(cp.0, 0);
754 }
755
756 #[test]
757 fn software_context_submit_copy() {
758 let mut ctx = SoftwareContext::new();
759 let src = ctx.create_buffer(4, BufferUsage::COPY_SRC);
760 let dst = ctx.create_buffer(4, BufferUsage::COPY_DST);
761 ctx.write_buffer(src, &[10, 20, 30, 40]);
762 ctx.submit(&[GpuCommand::CopyBufferToBuffer {
763 src,
764 dst,
765 size: 4,
766 }]);
767 assert_eq!(ctx.read_buffer(dst), vec![10, 20, 30, 40]);
768 assert_eq!(ctx.command_count(), 1);
769 }
770
771 #[test]
772 fn software_context_destroy() {
773 let mut ctx = SoftwareContext::new();
774 let buf = ctx.create_buffer(8, BufferUsage::VERTEX);
775 ctx.destroy_buffer(buf);
776 assert!(ctx.read_buffer(buf).is_empty());
777 }
778
779 #[test]
780 fn opengl_context_delegates() {
781 let mut ctx = OpenGLContext::new(false);
782 assert!(!ctx.has_gl());
783 assert_eq!(ctx.name(), "OpenGL");
784 let buf = ctx.create_buffer(16, BufferUsage::UNIFORM);
785 ctx.write_buffer(buf, &[0xAA; 16]);
786 assert_eq!(ctx.read_buffer(buf), vec![0xAA; 16]);
787 }
788
789 #[test]
790 fn pipeline_layout_default_empty() {
791 let layout = PipelineLayout::default();
792 assert!(layout.bind_group_layouts.is_empty());
793 }
794
795 #[test]
796 fn binding_type_equality() {
797 assert_eq!(BindingType::UniformBuffer, BindingType::UniformBuffer);
798 assert_ne!(BindingType::Texture, BindingType::Sampler);
799 }
800
801 #[test]
802 fn gpu_command_clone() {
803 let cmd = GpuCommand::Barrier;
804 let _cmd2 = cmd.clone();
805 }
806
807 #[test]
808 fn handles_are_unique() {
809 let mut ctx = SoftwareContext::new();
810 let a = ctx.create_buffer(1, BufferUsage::VERTEX);
811 let b = ctx.create_buffer(1, BufferUsage::VERTEX);
812 assert_ne!(a, b);
813 }
814
815 #[test]
816 fn write_buffer_truncates_to_size() {
817 let mut ctx = SoftwareContext::new();
818 let buf = ctx.create_buffer(4, BufferUsage::STORAGE);
819 ctx.write_buffer(buf, &[1, 2, 3, 4, 5, 6]);
821 assert_eq!(ctx.read_buffer(buf), vec![1, 2, 3, 4]);
822 }
823}