Skip to main content

vulkano/sync/
mod.rs

1//! Synchronization on the GPU.
2//!
3//! Just like for CPU code, you have to ensure that buffers and images are not accessed mutably by
4//! multiple GPU queues simultaneously and that they are not accessed mutably by the CPU and by the
5//! GPU simultaneously.
6//!
7//! This safety is enforced at runtime by vulkano but it is not magic and you will require some
8//! knowledge if you want to avoid errors.
9
10#[allow(unused)]
11pub(crate) use self::pipeline::*;
12pub use self::{
13    future::{now, GpuFuture},
14    pipeline::{
15        AccessFlags, BufferMemoryBarrier, DependencyFlags, DependencyInfo, ImageMemoryBarrier,
16        MemoryBarrier, PipelineStage, PipelineStages, QueueFamilyOwnershipTransfer,
17    },
18};
19use crate::{device::Queue, VulkanError};
20use smallvec::SmallVec;
21use std::{
22    error::Error,
23    fmt::{Display, Formatter},
24    sync::Arc,
25};
26
27pub mod event;
28pub mod fence;
29pub mod future;
30mod pipeline;
31pub mod semaphore;
32
33/// Declares in which queue(s) a resource can be used.
34///
35/// When you create a buffer or an image, you have to tell the Vulkan library in which queue
36/// families it will be used. The vulkano library requires you to tell in which queue family
37/// the resource will be used, even for exclusive mode.
38#[derive(Debug, Clone, PartialEq, Eq)]
39// TODO: remove
40pub enum SharingMode {
41    /// The resource is used is only one queue family.
42    Exclusive,
43    /// The resource is used in multiple queue families. Can be slower than `Exclusive`.
44    Concurrent(Vec<u32>), // TODO: Vec is too expensive here
45}
46
47impl<'a> From<&'a Arc<Queue>> for SharingMode {
48    #[inline]
49    fn from(_queue: &'a Arc<Queue>) -> SharingMode {
50        SharingMode::Exclusive
51    }
52}
53
54impl<'a> From<&'a [&'a Arc<Queue>]> for SharingMode {
55    #[inline]
56    fn from(queues: &'a [&'a Arc<Queue>]) -> SharingMode {
57        SharingMode::Concurrent(
58            queues
59                .iter()
60                .map(|queue| queue.queue_family_index())
61                .collect(),
62        )
63    }
64}
65
66/// Declares in which queue(s) a resource can be used.
67#[derive(Debug, Clone, PartialEq, Eq, Hash)]
68pub enum Sharing<I>
69where
70    I: IntoIterator<Item = u32>,
71{
72    /// The resource is used is only one queue family.
73    Exclusive,
74    /// The resource is used in multiple queue families. Can be slower than `Exclusive`.
75    Concurrent(I),
76}
77
78impl Sharing<SmallVec<[u32; 4]>> {
79    /// Returns `true` if `self` is the `Exclusive` variant.
80    #[inline]
81    pub fn is_exclusive(&self) -> bool {
82        matches!(self, Self::Exclusive)
83    }
84
85    /// Returns `true` if `self` is the `Concurrent` variant.
86    #[inline]
87    pub fn is_concurrent(&self) -> bool {
88        matches!(self, Self::Concurrent(..))
89    }
90
91    pub(crate) fn to_vk(&self) -> (ash::vk::SharingMode, &[u32]) {
92        match self {
93            Sharing::Exclusive => (ash::vk::SharingMode::EXCLUSIVE, [].as_slice()),
94            Sharing::Concurrent(queue_family_indices) => (
95                ash::vk::SharingMode::CONCURRENT,
96                queue_family_indices.as_slice(),
97            ),
98        }
99    }
100}
101
102/// How the memory of a resource is currently being accessed.
103#[derive(Clone, Copy, Debug, PartialEq, Eq)]
104pub(crate) enum CurrentAccess {
105    /// The resource is currently being accessed exclusively by the CPU.
106    CpuExclusive,
107
108    /// The resource is currently being accessed exclusively by the GPU.
109    /// The GPU can have multiple exclusive accesses, if they are separated by synchronization.
110    ///
111    /// `gpu_writes` must not be 0. If it's decremented to 0, switch to `Shared`.
112    GpuExclusive { gpu_reads: usize, gpu_writes: usize },
113
114    /// The resource is not currently being accessed, or is being accessed for reading only.
115    Shared { cpu_reads: usize, gpu_reads: usize },
116}
117
118/// Error when attempting to read or write a resource from the host (CPU).
119#[derive(Clone, Debug)]
120pub enum HostAccessError {
121    AccessConflict(AccessConflict),
122    Invalidate(VulkanError),
123    Unmanaged,
124    NotHostMapped,
125    OutOfMappedRange,
126}
127
128impl Error for HostAccessError {
129    fn source(&self) -> Option<&(dyn Error + 'static)> {
130        match self {
131            Self::AccessConflict(err) => Some(err),
132            Self::Invalidate(err) => Some(err),
133            _ => None,
134        }
135    }
136}
137
138impl Display for HostAccessError {
139    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
140        match self {
141            Self::AccessConflict(_) => {
142                write!(f, "the resource is already in use in a conflicting way")
143            }
144            Self::Unmanaged => write!(f, "the resource is not managed by vulkano"),
145            HostAccessError::Invalidate(_) => write!(f, "invalidating the device memory failed"),
146            HostAccessError::NotHostMapped => {
147                write!(f, "the device memory is not current host-mapped")
148            }
149            HostAccessError::OutOfMappedRange => write!(
150                f,
151                "the requested range is not within the currently mapped range of device memory",
152            ),
153        }
154    }
155}
156
157/// Conflict when attempting to access a resource.
158#[derive(Clone, Debug, PartialEq, Eq)]
159pub enum AccessConflict {
160    /// The resource is already locked for reading by the host (CPU).
161    HostRead,
162
163    /// The resource is already locked for writing by the host (CPU).
164    HostWrite,
165
166    /// The resource is already locked for reading by the device (GPU).
167    DeviceRead,
168
169    /// The resource is already locked for writing by the device (GPU).
170    DeviceWrite,
171}
172
173impl Error for AccessConflict {}
174
175impl Display for AccessConflict {
176    fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), std::fmt::Error> {
177        match self {
178            AccessConflict::HostRead => write!(
179                f,
180                "the resource is already locked for reading by the host (CPU)"
181            ),
182            AccessConflict::HostWrite => write!(
183                f,
184                "the resource is already locked for writing by the host (CPU)"
185            ),
186            AccessConflict::DeviceRead => write!(
187                f,
188                "the resource is already locked for reading by the device (GPU)"
189            ),
190            AccessConflict::DeviceWrite => write!(
191                f,
192                "the resource is already locked for writing by the device (GPU)"
193            ),
194        }
195    }
196}