1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
use gfx_hal::{
  memory::{
    Segment,
    SparseFlags,
  },
  prelude::Device,
  Backend,
};

use super::gpu::Gpu;

// Reuse gfx-hal buffer usage & properties for now.
pub type Usage = gfx_hal::buffer::Usage;
pub type Properties = gfx_hal::memory::Properties;

/// The type of buffers that can be allocated on the GPU.
#[derive(Debug, Clone, Copy)]
pub enum BufferType {
  Vertex,
  Index,
  Uniform,
  Storage,
}

/// A buffer is a block of memory that can be used to store data that can be
/// accessed by the GPU.
#[derive(Debug, Clone, Copy)]
pub struct Buffer<RenderBackend: Backend> {
  buffer: RenderBackend::Buffer,
  memory: RenderBackend::Memory,
  stride: usize,
  buffer_type: BufferType,
}

impl<RenderBackend: Backend> Buffer<RenderBackend> {
  /// Destroy the buffer and all it's resources with the GPU that
  /// created it.
  pub fn destroy(self, gpu: &Gpu<RenderBackend>) {
    unsafe {
      gpu.internal_logical_device().free_memory(self.memory);
      gpu.internal_logical_device().destroy_buffer(self.buffer);
    }
  }

  /// Size of the buffer in bytes.
  pub fn stride(&self) -> usize {
    return self.stride;
  }
}

impl<RenderBackend: Backend> Buffer<RenderBackend> {
  /// Retrieve a reference to the internal buffer.
  pub(super) fn internal_buffer(&self) -> &RenderBackend::Buffer {
    return &self.buffer;
  }
}

pub struct BufferBuilder {
  buffer_length: usize,
  usage: Usage,
  properties: Properties,
  buffer_type: BufferType,
}

impl BufferBuilder {
  pub fn new() -> Self {
    return Self {
      buffer_length: 0,
      usage: Usage::empty(),
      properties: Properties::empty(),
      buffer_type: BufferType::Vertex,
    };
  }

  pub fn with_length(&mut self, length: usize) -> &mut Self {
    self.buffer_length = length;
    return self;
  }

  pub fn with_usage(&mut self, usage: Usage) -> &mut Self {
    self.usage = usage;
    return self;
  }

  pub fn with_properties(&mut self, properties: Properties) -> &mut Self {
    self.properties = properties;
    return self;
  }

  pub fn with_buffer_type(&mut self, buffer_type: BufferType) -> &mut Self {
    self.buffer_type = buffer_type;
    return self;
  }

  /// Builds & binds a buffer of memory to the GPU. If the buffer cannot be
  /// bound to the GPU, the buffer memory is freed before the error is returned.
  /// Data must represent the data that will be stored in the buffer, meaning
  /// it must repr C and be the same size as the buffer length.
  pub fn build<RenderBackend: Backend, Data: Sized>(
    &self,
    gpu: &mut Gpu<RenderBackend>,
    data: Vec<Data>,
  ) -> Result<Buffer<RenderBackend>, &'static str> {
    use gfx_hal::{
      adapter::PhysicalDevice,
      MemoryTypeId,
    };
    let logical_device = gpu.internal_logical_device();
    let physical_device = gpu.internal_physical_device();

    // TODO(vmarcella): Add the ability for the user to specify the memory
    // properties (I.E. SparseFlags::SPARSE_MEMORY).
    println!("[DEBUG] Creating buffer of length: {}", self.buffer_length);
    let buffer_result = unsafe {
      logical_device.create_buffer(
        self.buffer_length as u64,
        self.usage,
        SparseFlags::empty(),
      )
    };

    if buffer_result.is_err() {
      return Err("Failed to create buffer for allocating memory.");
    }

    let mut buffer = buffer_result.unwrap();

    let requirements =
      unsafe { logical_device.get_buffer_requirements(&buffer) };
    let memory_types = physical_device.memory_properties().memory_types;

    println!("[DEBUG] Buffer requirements: {:?}", requirements);
    // Find a memory type that supports the requirements of the buffer.
    let memory_type = memory_types
      .iter()
      .enumerate()
      .find(|(id, memory_type)| {
        let type_supported = requirements.type_mask & (1 << id) != 0;
        type_supported && memory_type.properties.contains(self.properties)
      })
      .map(|(id, _)| MemoryTypeId(id))
      .unwrap();

    println!("Allocating memory for buffer.");
    // Allocates the memory on the GPU for the buffer.
    let buffer_memory_allocation =
      unsafe { logical_device.allocate_memory(memory_type, requirements.size) };

    if buffer_memory_allocation.is_err() {
      return Err("Failed to allocate memory for buffer.");
    }

    let mut buffer_memory = buffer_memory_allocation.unwrap();

    // Bind the buffer to the GPU memory
    let buffer_binding = unsafe {
      logical_device.bind_buffer_memory(&buffer_memory, 0, &mut buffer)
    };

    // Destroy the buffer if we failed to bind it to memory.
    if buffer_binding.is_err() {
      unsafe { logical_device.destroy_buffer(buffer) };
      return Err("Failed to bind buffer memory.");
    }

    // Get address of the buffer memory on the GPU so that we can write to it.
    let get_mapping_to_memory =
      unsafe { logical_device.map_memory(&mut buffer_memory, Segment::ALL) };

    if get_mapping_to_memory.is_err() {
      unsafe { logical_device.destroy_buffer(buffer) };
      return Err("Failed to map memory.");
    }
    let mapped_memory = get_mapping_to_memory.unwrap();

    // Copy the data to the GPU memory.
    unsafe {
      std::ptr::copy_nonoverlapping(
        data.as_ptr() as *const u8,
        mapped_memory,
        self.buffer_length,
      );
    };

    // Flush the data to ensure it is written to the GPU memory.
    let memory_flush = unsafe {
      logical_device
        .flush_mapped_memory_ranges(std::iter::once((
          &buffer_memory,
          Segment::ALL,
        )))
        .map_err(|_| "Failed to flush memory.")
    };

    if memory_flush.is_err() {
      unsafe { logical_device.destroy_buffer(buffer) };
      return Err("No memory available on the GPU.");
    }

    // Unmap the memory now that it's no longer needed by the CPU.
    unsafe { logical_device.unmap_memory(&mut buffer_memory) };

    return Ok(Buffer {
      buffer,
      memory: buffer_memory,
      stride: std::mem::size_of::<Data>(),
      buffer_type: self.buffer_type,
    });
  }
}