1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
//! GPU synchronization Implementations that are built on top of gfx-hal and
//! are used by the lambda-platform rendering implementations to synchronize
//! GPU operations.

use gfx_hal::device::Device;

pub struct RenderSemaphoreBuilder {}

impl RenderSemaphoreBuilder {
  pub fn new() -> Self {
    return Self {};
  }

  /// Builds a new render semaphore using the provided GPU. This semaphore can
  /// only be used with the GPU that it was created with.
  pub fn build<RenderBackend: gfx_hal::Backend>(
    self,
    gpu: &mut super::gpu::Gpu<RenderBackend>,
  ) -> RenderSemaphore<RenderBackend> {
    let semaphore = gpu
      .internal_logical_device()
      .create_semaphore()
      .expect("The GPU has no memory to allocate the semaphore");

    return RenderSemaphore { semaphore };
  }
}

/// Render semaphores are used to synchronize operations happening within the
/// GPU. This allows for us to tell the GPU to wait for a frame to finish
/// rendering before presenting it to the screen.
pub struct RenderSemaphore<RenderBackend: gfx_hal::Backend> {
  semaphore: RenderBackend::Semaphore,
}

impl<RenderBackend: gfx_hal::Backend> RenderSemaphore<RenderBackend> {
  /// Destroys the semaphore using the GPU that created it.
  pub fn destroy(self, gpu: &super::gpu::Gpu<RenderBackend>) {
    unsafe {
      gpu
        .internal_logical_device()
        .destroy_semaphore(self.semaphore)
    }
  }
}

impl<RenderBackend: gfx_hal::Backend> RenderSemaphore<RenderBackend> {
  /// Retrieve a reference to the internal semaphore.
  pub(super) fn internal_semaphore(&self) -> &RenderBackend::Semaphore {
    return &self.semaphore;
  }

  /// Retrieve a mutable reference to the internal semaphore.
  pub(super) fn internal_semaphore_mut(
    &mut self,
  ) -> &mut RenderBackend::Semaphore {
    return &mut self.semaphore;
  }
}

pub struct RenderSubmissionFenceBuilder {
  default_render_timeout: u64,
}

impl RenderSubmissionFenceBuilder {
  /// Creates a new Render Submission Fence Builder that defaults to a 1 second
  /// timeout for waiting on the fence.
  pub fn new() -> Self {
    return Self {
      default_render_timeout: 1_000_000_000,
    };
  }

  /// Provides a default render timeout in nanoseconds. This render timeout is
  /// used to reset the submission fence if it's time-to-live expires.
  pub fn with_render_timeout(mut self, render_timeout: u64) -> Self {
    self.default_render_timeout = render_timeout;
    return self;
  }

  /// Builds a new submission fence using the provided GPU. This fence can only
  /// be used to block operation on the GPU that created it.
  pub fn build<RenderBackend: gfx_hal::Backend>(
    self,
    gpu: &mut super::gpu::Gpu<RenderBackend>,
  ) -> RenderSubmissionFence<RenderBackend> {
    let fence = gpu
      .internal_logical_device()
      .create_fence(true)
      .expect("There is not enough memory to create a fence on this device.");

    return RenderSubmissionFence {
      fence,
      default_render_timeout: self.default_render_timeout,
    };
  }
}

/// A GPU fence is used to synchronize GPU operations. It is used to ensure that
/// a GPU operation has completed before the CPU attempts to submit commands to
/// it.
pub struct RenderSubmissionFence<RenderBackend: gfx_hal::Backend> {
  fence: RenderBackend::Fence,
  default_render_timeout: u64,
}

impl<RenderBackend: gfx_hal::Backend> RenderSubmissionFence<RenderBackend> {
  /// Block a GPU until the fence is ready and then reset the fence status.
  pub fn block_until_ready(
    &mut self,
    gpu: &mut super::gpu::Gpu<RenderBackend>,
    render_timeout_override: Option<u64>,
  ) {
    let timeout = match render_timeout_override {
      Some(render_timeout_override) => render_timeout_override,
      None => self.default_render_timeout,
    };

    unsafe {
      gpu.internal_logical_device()
        .wait_for_fence(&self.fence, timeout)
    }
    .expect("The GPU ran out of memory or has become detached from the current context.");

    unsafe { gpu.internal_logical_device().reset_fence(&mut self.fence) }
      .expect("The fence failed to reset.");
  }

  /// Destroy this fence given the GPU that created it.
  pub fn destroy(self, gpu: &super::gpu::Gpu<RenderBackend>) {
    unsafe { gpu.internal_logical_device().destroy_fence(self.fence) }
  }
}

impl<RenderBackend: gfx_hal::Backend> RenderSubmissionFence<RenderBackend> {
  /// Retrieve the underlying fence.
  pub fn internal_fence(&self) -> &RenderBackend::Fence {
    return &self.fence;
  }

  /// Retrieve a mutable reference to the underlying fence.
  pub fn internal_fence_mut(&mut self) -> &mut RenderBackend::Fence {
    return &mut self.fence;
  }
}