vulkano_taskgraph/lib.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
//! Vulkano's **EXPERIMENTAL** task graph implementation. Expect many bugs and incomplete features.
//! There is also currently no validation except the most barebones sanity checks. You may also get
//! panics in random places.
use command_buffer::RecordingCommandBuffer;
use concurrent_slotmap::SlotId;
use graph::{CompileInfo, ExecuteError, ResourceMap, TaskGraph};
use linear_map::LinearMap;
use resource::{
AccessTypes, BufferState, Flight, HostAccessType, ImageLayoutType, ImageState, Resources,
SwapchainState,
};
use std::{
any::{Any, TypeId},
cell::Cell,
cmp,
error::Error,
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
mem,
ops::{Deref, RangeBounds},
sync::Arc,
};
use vulkano::{
buffer::{Buffer, BufferContents, BufferMemory, Subbuffer},
command_buffer as raw,
device::Queue,
format::ClearValue,
image::Image,
render_pass::Framebuffer,
swapchain::Swapchain,
DeviceSize, ValidationError,
};
pub mod command_buffer;
pub mod graph;
mod linear_map;
pub mod resource;
/// Creates a [`TaskGraph`] with one task node, compiles it, and executes it.
pub unsafe fn execute(
queue: &Arc<Queue>,
resources: &Arc<Resources>,
flight_id: Id<Flight>,
task: impl FnOnce(&mut RecordingCommandBuffer<'_>, &mut TaskContext<'_>) -> TaskResult,
host_buffer_accesses: impl IntoIterator<Item = (Id<Buffer>, HostAccessType)>,
buffer_accesses: impl IntoIterator<Item = (Id<Buffer>, AccessTypes)>,
image_accesses: impl IntoIterator<Item = (Id<Image>, AccessTypes, ImageLayoutType)>,
) -> Result<(), ExecuteError> {
#[repr(transparent)]
struct OnceTask<'a>(
&'a dyn Fn(&mut RecordingCommandBuffer<'_>, &mut TaskContext<'_>) -> TaskResult,
);
// SAFETY: The task is constructed inside this function and never leaves its scope, so there is
// no way it could be sent to another thread.
unsafe impl Send for OnceTask<'_> {}
// SAFETY: The task is constructed inside this function and never leaves its scope, so there is
// no way it could be shared with another thread.
unsafe impl Sync for OnceTask<'_> {}
impl Task for OnceTask<'static> {
type World = ();
unsafe fn execute(
&self,
cbf: &mut RecordingCommandBuffer<'_>,
tcx: &mut TaskContext<'_>,
_: &Self::World,
) -> TaskResult {
(self.0)(cbf, tcx)
}
}
let task = Cell::new(Some(task));
let trampoline = move |cbf: &mut RecordingCommandBuffer<'_>, tcx: &mut TaskContext<'_>| {
// `ExecutableTaskGraph::execute` calls each task exactly once, and we only execute the
// task graph once.
(Cell::take(&task).unwrap())(cbf, tcx)
};
let mut task_graph = TaskGraph::new(resources, 1, 64 * 1024);
for (id, access_type) in host_buffer_accesses {
task_graph.add_host_buffer_access(id, access_type);
}
let mut node = task_graph.create_task_node(
"",
QueueFamilyType::Specific {
index: queue.queue_family_index(),
},
// SAFETY: The task never leaves this function scope, so it is safe to pretend that the
// local `trampoline` and its captures from the outer scope live forever.
unsafe { mem::transmute::<OnceTask<'_>, OnceTask<'static>>(OnceTask(&trampoline)) },
);
for (id, access_types) in buffer_accesses {
node.buffer_access(id, access_types);
}
for (id, access_types, layout_type) in image_accesses {
node.image_access(id, access_types, layout_type);
}
// SAFETY:
// * The user must ensure that there are no accesses that are incompatible with the queue.
// * The user must ensure that there are no accesses incompatible with the device.
let task_graph = unsafe {
task_graph.compile(&CompileInfo {
queues: &[queue],
present_queue: None,
flight_id,
_ne: crate::NE,
})
}
.unwrap();
let resource_map = ResourceMap::new(&task_graph).unwrap();
// SAFETY: The user must ensure that there are no other task graphs executing that access any
// of the same subresources.
unsafe { task_graph.execute(resource_map, &(), || {}) }
}
/// A task represents a unit of work to be recorded to a command buffer.
pub trait Task: Any + Send + Sync {
type World: ?Sized;
// Potentially TODO:
// fn update(&mut self, ...) {}
/// If the task node was created with any attachments which were [set to be cleared], this
/// method is invoked to allow the task to set clear values for such attachments.
///
/// This method is invoked at least once for every attachment to be cleared before every
/// execution of the task. It's possible that it is invoked multiple times before the task is
/// executed, and it may not be invoked right before [`execute`], just at some point between
/// when the task graph has begun execution and when the task is executed.
///
/// [set to be cleared]: graph::AttachmentInfo::clear
/// [`execute`]: Self::execute
#[allow(unused)]
fn clear_values(&self, clear_values: &mut ClearValues<'_>) {}
/// Executes the task, which should record its commands using the provided command buffer and
/// context.
///
/// # Safety
///
/// - Every resource in the [task's access set] must not be written to concurrently in any
/// other tasks during execution on the device.
/// - Every resource in the task's access set, if it's an [image access], must have had its
/// layout transitioned to the layout specified in the access.
/// - Every resource in the task's access set, if the resource's [sharing mode] is exclusive,
/// must be currently owned by the queue family the task is executing on.
///
/// [sharing mode]: vulkano::sync::Sharing
unsafe fn execute(
&self,
cbf: &mut RecordingCommandBuffer<'_>,
tcx: &mut TaskContext<'_>,
world: &Self::World,
) -> TaskResult;
}
impl<W: ?Sized + 'static> dyn Task<World = W> {
/// Returns `true` if `self` is of type `T`.
#[inline]
pub fn is<T: Task<World = W>>(&self) -> bool {
self.type_id() == TypeId::of::<T>()
}
/// Returns a reference to the inner value if it is of type `T`, or returns `None` otherwise.
#[inline]
pub fn downcast_ref<T: Task<World = W>>(&self) -> Option<&T> {
if self.is::<T>() {
// SAFETY: We just checked that the type is correct.
Some(unsafe { self.downcast_unchecked_ref() })
} else {
None
}
}
/// Returns a reference to the inner value if it is of type `T`, or returns `None` otherwise.
#[inline]
pub fn downcast_mut<T: Task<World = W>>(&mut self) -> Option<&mut T> {
if self.is::<T>() {
// SAFETY: We just checked that the type is correct.
Some(unsafe { self.downcast_unchecked_mut() })
} else {
None
}
}
/// Returns a reference to the inner value without checking if it is of type `T`.
///
/// # Safety
///
/// `self` must be of type `T`.
#[inline]
pub unsafe fn downcast_unchecked_ref<T: Task<World = W>>(&self) -> &T {
// SAFETY: The caller must guarantee that the type is correct.
unsafe { &*<*const dyn Task<World = W>>::cast::<T>(self) }
}
/// Returns a reference to the inner value without checking if it is of type `T`.
///
/// # Safety
///
/// `self` must be of type `T`.
#[inline]
pub unsafe fn downcast_unchecked_mut<T: Task<World = W>>(&mut self) -> &mut T {
// SAFETY: The caller must guarantee that the type is correct.
unsafe { &mut *<*mut dyn Task<World = W>>::cast::<T>(self) }
}
}
impl<W: ?Sized> fmt::Debug for dyn Task<World = W> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Task").finish_non_exhaustive()
}
}
/// An implementation of a phantom task, which is zero-sized and doesn't do anything.
///
/// You may want to use this if all you're interested in is the automatic synchronization and don't
/// have any other commands to execute. A common example would be doing a queue family ownership
/// transfer after doing an upload.
impl<W: ?Sized + 'static> Task for PhantomData<fn() -> W> {
type World = W;
unsafe fn execute(
&self,
_cbf: &mut RecordingCommandBuffer<'_>,
_tcx: &mut TaskContext<'_>,
_world: &Self::World,
) -> TaskResult {
Ok(())
}
}
/// The context of a task.
///
/// This gives you access to the resources.
pub struct TaskContext<'a> {
resource_map: &'a ResourceMap<'a>,
current_frame_index: u32,
command_buffers: &'a mut Vec<Arc<raw::CommandBuffer>>,
}
impl<'a> TaskContext<'a> {
/// Returns the buffer corresponding to `id`, or returns an error if it isn't present.
#[inline]
pub fn buffer(&self, id: Id<Buffer>) -> TaskResult<&'a BufferState> {
if id.is_virtual() {
// SAFETY: The caller of `Task::execute` must ensure that `self.resource_map` maps the
// virtual IDs of the graph exhaustively.
Ok(unsafe { self.resource_map.buffer(id) }?)
} else {
// SAFETY: `ResourceMap` owns an `epoch::Guard`.
Ok(unsafe { self.resource_map.resources().buffer_unprotected(id) }?)
}
}
/// Returns the image corresponding to `id`, or returns an error if it isn't present.
///
/// # Panics
///
/// - Panics if `id` refers to a swapchain image.
#[inline]
pub fn image(&self, id: Id<Image>) -> TaskResult<&'a ImageState> {
assert_ne!(id.object_type(), ObjectType::Swapchain);
if id.is_virtual() {
// SAFETY: The caller of `Task::execute` must ensure that `self.resource_map` maps the
// virtual IDs of the graph exhaustively.
Ok(unsafe { self.resource_map.image(id) }?)
} else {
// SAFETY: `ResourceMap` owns an `epoch::Guard`.
Ok(unsafe { self.resource_map.resources().image_unprotected(id) }?)
}
}
/// Returns the swapchain corresponding to `id`, or returns an error if it isn't present.
#[inline]
pub fn swapchain(&self, id: Id<Swapchain>) -> TaskResult<&'a SwapchainState> {
if id.is_virtual() {
// SAFETY: The caller of `Task::execute` must ensure that `self.resource_map` maps the
// virtual IDs of the graph exhaustively.
Ok(unsafe { self.resource_map.swapchain(id) }?)
} else {
// SAFETY: `ResourceMap` owns an `epoch::Guard`.
Ok(unsafe { self.resource_map.resources().swapchain_unprotected(id) }?)
}
}
/// Returns the `ResourceMap`.
#[inline]
pub fn resource_map(&self) -> &'a ResourceMap<'a> {
self.resource_map
}
/// Returns the index of the current [frame] in [flight].
#[inline]
#[must_use]
pub fn current_frame_index(&self) -> u32 {
self.current_frame_index
}
/// Tries to get read access to a portion of the buffer corresponding to `id`.
///
/// If host read access for the buffer is not accounted for in the [task graph's host access
/// set], this method will return an error.
///
/// If the memory backing the buffer is not managed by vulkano (i.e. the buffer was created
/// by [`RawBuffer::assume_bound`]), then it can't be read using this method and an error will
/// be returned.
///
/// # Panics
///
/// - Panics if the alignment of `T` is greater than 64.
/// - Panics if [`Subbuffer::slice`] with the given `range` panics.
/// - Panics if [`Subbuffer::reinterpret`] to the given `T` panics.
///
/// [`RawBuffer::assume_bound`]: vulkano::buffer::sys::RawBuffer::assume_bound
pub fn read_buffer<T: BufferContents + ?Sized>(
&self,
id: Id<Buffer>,
range: impl RangeBounds<DeviceSize>,
) -> TaskResult<&T> {
self.validate_read_buffer(id)?;
// SAFETY: We checked that the task has read access to the buffer above, which also
// includes the guarantee that no other tasks can be writing the subbuffer on neither the
// host nor the device. The same task cannot obtain another mutable reference to the buffer
// because `TaskContext::write_buffer` requires a mutable reference.
unsafe { self.read_buffer_unchecked(id, range) }
}
fn validate_read_buffer(&self, id: Id<Buffer>) -> Result<(), Box<ValidationError>> {
if !self
.resource_map
.virtual_resources()
.contains_host_buffer_access(id, HostAccessType::Read)
{
return Err(Box::new(ValidationError {
context: "TaskContext::read_buffer".into(),
problem: "the task graph does not have an access of type `HostAccessType::Read` \
for the buffer"
.into(),
..Default::default()
}));
}
Ok(())
}
/// Gets read access to a portion of the buffer corresponding to `id` without checking if this
/// access is accounted for in the [task graph's host access set].
///
/// If the memory backing the buffer is not managed by vulkano (i.e. the buffer was created
/// by [`RawBuffer::assume_bound`]), then it can't be read using this method and an error will
/// be returned.
///
/// # Safety
///
/// This access must be accounted for in the task graph's host access set.
///
/// # Panics
///
/// - Panics if the alignment of `T` is greater than 64.
/// - Panics if [`Subbuffer::slice`] with the given `range` panics.
/// - Panics if [`Subbuffer::reinterpret`] to the given `T` panics.
///
/// [`RawBuffer::assume_bound`]: vulkano::buffer::sys::RawBuffer::assume_bound
pub unsafe fn read_buffer_unchecked<T: BufferContents + ?Sized>(
&self,
id: Id<Buffer>,
range: impl RangeBounds<DeviceSize>,
) -> TaskResult<&T> {
assert!(T::LAYOUT.alignment().as_devicesize() <= 64);
let buffer = self.buffer(id)?.buffer();
let subbuffer = Subbuffer::from(buffer.clone())
.slice(range)
.reinterpret::<T>();
let allocation = match buffer.memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => {
todo!("`TaskContext::read_buffer` doesn't support sparse binding yet");
}
BufferMemory::External => {
return Err(TaskError::HostAccess(HostAccessError::Unmanaged));
}
_ => unreachable!(),
};
unsafe { allocation.mapped_slice_unchecked(..) }.map_err(|err| match err {
vulkano::sync::HostAccessError::NotHostMapped => HostAccessError::NotHostMapped,
vulkano::sync::HostAccessError::OutOfMappedRange => HostAccessError::OutOfMappedRange,
_ => unreachable!(),
})?;
let mapped_slice = subbuffer.mapped_slice().unwrap();
// SAFETY: The caller must ensure that access to the data is synchronized.
let data_ptr = unsafe { T::ptr_from_slice(mapped_slice) };
let data = unsafe { &*data_ptr };
Ok(data)
}
/// Tries to get write access to a portion of the buffer corresponding to `id`.
///
/// If host write access for the buffer is not accounted for in the [task graph's host access
/// set], this method will return an error.
///
/// If the memory backing the buffer is not managed by vulkano (i.e. the buffer was created
/// by [`RawBuffer::assume_bound`]), then it can't be written using this method and an error
/// will be returned.
///
/// # Panics
///
/// - Panics if the alignment of `T` is greater than 64.
/// - Panics if [`Subbuffer::slice`] with the given `range` panics.
/// - Panics if [`Subbuffer::reinterpret`] to the given `T` panics.
///
/// [`RawBuffer::assume_bound`]: vulkano::buffer::sys::RawBuffer::assume_bound
pub fn write_buffer<T: BufferContents + ?Sized>(
&mut self,
id: Id<Buffer>,
range: impl RangeBounds<DeviceSize>,
) -> TaskResult<&mut T> {
self.validate_write_buffer(id)?;
// SAFETY: We checked that the task has write access to the buffer above, which also
// includes the guarantee that no other tasks can be accessing the buffer on neither the
// host nor the device. The same task cannot obtain another mutable reference to the buffer
// because `TaskContext::write_buffer` requires a mutable reference.
unsafe { self.write_buffer_unchecked(id, range) }
}
fn validate_write_buffer(&self, id: Id<Buffer>) -> Result<(), Box<ValidationError>> {
if !self
.resource_map
.virtual_resources()
.contains_host_buffer_access(id, HostAccessType::Write)
{
return Err(Box::new(ValidationError {
context: "TaskContext::write_buffer".into(),
problem: "the task graph does not have an access of type `HostAccessType::Write` \
for the buffer"
.into(),
..Default::default()
}));
}
Ok(())
}
/// Gets write access to a portion of the buffer corresponding to `id` without checking if this
/// access is accounted for in the [task graph's host access set].
///
/// If the memory backing the buffer is not managed by vulkano (i.e. the buffer was created
/// by [`RawBuffer::assume_bound`]), then it can't be written using this method and an error
/// will be returned.
///
/// # Safety
///
/// This access must be accounted for in the task graph's host access set.
///
/// # Panics
///
/// - Panics if the alignment of `T` is greater than 64.
/// - Panics if [`Subbuffer::slice`] with the given `range` panics.
/// - Panics if [`Subbuffer::reinterpret`] to the given `T` panics.
///
/// [`RawBuffer::assume_bound`]: vulkano::buffer::sys::RawBuffer::assume_bound
pub unsafe fn write_buffer_unchecked<T: BufferContents + ?Sized>(
&mut self,
id: Id<Buffer>,
range: impl RangeBounds<DeviceSize>,
) -> TaskResult<&mut T> {
assert!(T::LAYOUT.alignment().as_devicesize() <= 64);
let buffer = self.buffer(id)?.buffer();
let subbuffer = Subbuffer::from(buffer.clone())
.slice(range)
.reinterpret::<T>();
let allocation = match buffer.memory() {
BufferMemory::Normal(a) => a,
BufferMemory::Sparse => {
todo!("`TaskContext::write_buffer` doesn't support sparse binding yet");
}
BufferMemory::External => {
return Err(TaskError::HostAccess(HostAccessError::Unmanaged));
}
_ => unreachable!(),
};
unsafe { allocation.mapped_slice_unchecked(..) }.map_err(|err| match err {
vulkano::sync::HostAccessError::NotHostMapped => HostAccessError::NotHostMapped,
vulkano::sync::HostAccessError::OutOfMappedRange => HostAccessError::OutOfMappedRange,
_ => unreachable!(),
})?;
let mapped_slice = subbuffer.mapped_slice().unwrap();
// SAFETY: The caller must ensure that access to the data is synchronized.
let data_ptr = unsafe { T::ptr_from_slice(mapped_slice) };
let data = unsafe { &mut *data_ptr };
Ok(data)
}
/// Pushes a command buffer into the list of command buffers to be executed on the queue.
///
/// All command buffers will be executed in the order in which they are pushed after the task
/// has finished execution. That means in particular, that commands recorded by the task will
/// start execution before execution of any pushed command buffers starts.
///
/// # Safety
///
/// Since the command buffer will be executed on the same queue right after the current command
/// buffer, without any added synchronization, it must be safe to do so. The given command
/// buffer must not do any accesses not accounted for in the [task's access set], or ensure
/// that such accesses are appropriately synchronized.
#[inline]
pub unsafe fn push_command_buffer(&mut self, command_buffer: Arc<raw::CommandBuffer>) {
self.command_buffers.push(command_buffer);
}
/// Extends the list of command buffers to be executed on the queue.
///
/// This function behaves identically to the [`push_command_buffer`] method, except that it
/// pushes all command buffers from the given iterator in order.
///
/// # Safety
///
/// See the [`push_command_buffer`] method for the safety preconditions.
///
/// [`push_command_buffer`]: Self::push_command_buffer
#[inline]
pub unsafe fn extend_command_buffers(
&mut self,
command_buffers: impl IntoIterator<Item = Arc<raw::CommandBuffer>>,
) {
self.command_buffers.extend(command_buffers);
}
}
/// Stores the clear value for each attachment that was [set to be cleared] when creating the task
/// node.
///
/// This is used to set the clear values in [`Task::clear_values`].
///
/// [set to be cleared]: graph::AttachmentInfo::clear
pub struct ClearValues<'a> {
inner: &'a mut LinearMap<Id, Option<ClearValue>>,
resource_map: &'a ResourceMap<'a>,
}
impl ClearValues<'_> {
/// Sets the clear value for the image corresponding to `id`.
#[inline]
pub fn set(&mut self, id: Id<Image>, clear_value: impl Into<ClearValue>) {
self.set_inner(id, clear_value.into());
}
fn set_inner(&mut self, id: Id<Image>, clear_value: ClearValue) {
let mut id = id.erase();
if !id.is_virtual() {
let virtual_resources = self.resource_map.virtual_resources();
if let Some(&virtual_id) = virtual_resources.physical_map().get(&id.erase()) {
id = virtual_id;
} else {
return;
}
}
if let Some(value) = self.inner.get_mut(&id) {
if value.is_none() {
*value = Some(clear_value);
}
}
}
}
/// The type of result returned by a task.
pub type TaskResult<T = (), E = TaskError> = ::std::result::Result<T, E>;
/// Error that can happen inside a task.
#[derive(Debug)]
pub enum TaskError {
InvalidSlot(InvalidSlotError),
HostAccess(HostAccessError),
ValidationError(Box<ValidationError>),
}
impl From<InvalidSlotError> for TaskError {
fn from(err: InvalidSlotError) -> Self {
Self::InvalidSlot(err)
}
}
impl From<HostAccessError> for TaskError {
fn from(err: HostAccessError) -> Self {
Self::HostAccess(err)
}
}
impl From<Box<ValidationError>> for TaskError {
fn from(err: Box<ValidationError>) -> Self {
Self::ValidationError(err)
}
}
impl fmt::Display for TaskError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let msg = match self {
Self::InvalidSlot(_) => "invalid slot",
Self::HostAccess(_) => "a host access error occurred",
Self::ValidationError(_) => "a validation error occurred",
};
f.write_str(msg)
}
}
impl Error for TaskError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::InvalidSlot(err) => Some(err),
Self::HostAccess(err) => Some(err),
Self::ValidationError(err) => Some(err),
}
}
}
/// Error that can happen when trying to retrieve a Vulkan object or state by [`Id`].
#[derive(Debug)]
pub struct InvalidSlotError {
id: Id,
}
impl InvalidSlotError {
fn new<O>(id: Id<O>) -> Self {
InvalidSlotError { id: id.erase() }
}
}
impl fmt::Display for InvalidSlotError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let &InvalidSlotError { id } = self;
let object_type = id.object_type();
write!(f, "invalid slot for object type `{object_type:?}`: {id:?}")
}
}
impl Error for InvalidSlotError {}
/// Error that can happen when attempting to read or write a resource from the host.
#[derive(Debug)]
pub enum HostAccessError {
Unmanaged,
NotHostMapped,
OutOfMappedRange,
}
impl fmt::Display for HostAccessError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let msg = match self {
Self::Unmanaged => "the resource is not managed by vulkano",
Self::NotHostMapped => "the device memory is not current host-mapped",
Self::OutOfMappedRange => {
"the requested range is not within the currently mapped range of device memory"
}
};
f.write_str(msg)
}
}
impl Error for HostAccessError {}
/// Specifies the type of queue family that a task can be executed on.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[non_exhaustive]
pub enum QueueFamilyType {
/// Picks a queue family that supports graphics and transfer operations.
Graphics,
/// Picks a queue family that supports compute and transfer operations.
Compute,
/// Picks a queue family that supports transfer operations.
Transfer,
// TODO:
// VideoDecode,
// TODO:
// VideoEncode,
/// Picks the queue family of the given index. You should generally avoid this and use one of
/// the other variants, so that the task graph compiler can pick the most optimal queue family
/// indices that still satisfy the supported operations that the tasks require (and also, it's
/// more convenient that way, as there's less to think about). Nevertheless, you may want to
/// use this if you're looking for some very specific outcome.
Specific { index: u32 },
}
/// This ID type is used throughout the crate to refer to Vulkan objects such as resource objects
/// and their synchronization state, synchronization object state, and other state.
///
/// The type parameter denotes the type of object or state being referred to.
///
/// Note that this ID **is not** globally unique. It is unique in the scope of a logical device.
#[repr(transparent)]
pub struct Id<T = ()> {
slot: SlotId,
marker: PhantomData<fn() -> T>,
}
impl<T> Id<T> {
/// An ID that's guaranteed to be invalid.
pub const INVALID: Self = Id {
slot: SlotId::INVALID,
marker: PhantomData,
};
const unsafe fn new(slot: SlotId) -> Self {
Id {
slot,
marker: PhantomData,
}
}
fn index(self) -> u32 {
self.slot.index()
}
/// Returns `true` if this ID represents a [virtual resource].
#[inline]
pub const fn is_virtual(self) -> bool {
self.slot.tag() & Id::VIRTUAL_BIT != 0
}
/// Returns `true` if this ID represents a resource with the exclusive sharing mode.
fn is_exclusive(self) -> bool {
self.slot.tag() & Id::EXCLUSIVE_BIT != 0
}
fn erase(self) -> Id {
unsafe { Id::new(self.slot) }
}
fn object_type(self) -> ObjectType {
match self.slot.tag() & Id::OBJECT_TYPE_MASK {
Buffer::TAG => ObjectType::Buffer,
Image::TAG => ObjectType::Image,
Swapchain::TAG => ObjectType::Swapchain,
Flight::TAG => ObjectType::Flight,
_ => unreachable!(),
}
}
}
impl Id<Swapchain> {
/// Returns the ID that always refers to the swapchain image that's currently acquired from the
/// swapchain.
#[inline]
pub const fn current_image_id(self) -> Id<Image> {
unsafe { Id::new(self.slot) }
}
}
impl Id {
const OBJECT_TYPE_MASK: u32 = 0b111;
const VIRTUAL_BIT: u32 = 1 << 7;
const EXCLUSIVE_BIT: u32 = 1 << 6;
fn is<O: Object>(self) -> bool {
self.object_type() == O::TYPE
}
unsafe fn parametrize<O: Object>(self) -> Id<O> {
unsafe { Id::new(self.slot) }
}
}
impl<T> Clone for Id<T> {
#[inline]
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Id<T> {}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if *self == Id::INVALID {
f.pad("Id::INVALID")
} else {
f.debug_struct("Id")
.field("index", &self.slot.index())
.field("generation", &self.slot.generation())
.finish()
}
}
}
impl<T> PartialEq for Id<T> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.slot == other.slot
}
}
impl<T> Eq for Id<T> {}
impl<T> Hash for Id<T> {
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
self.slot.hash(state);
}
}
impl<T> PartialOrd for Id<T> {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Id<T> {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
self.slot.cmp(&other.slot)
}
}
/// A reference to some Vulkan object or state.
///
/// When you use [`Id`] to retrieve something, you can get back a `Ref` with the same type
/// parameter, which you can then dereference to get at the underlying data denoted by the type
/// parameter.
pub struct Ref<'a, T>(concurrent_slotmap::Ref<'a, T>);
impl<T> Deref for Ref<'_, T> {
type Target = T;
#[inline]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T: fmt::Debug> fmt::Debug for Ref<'_, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.0, f)
}
}
trait Object {
const TYPE: ObjectType;
const TAG: u32 = Self::TYPE as u32;
}
impl Object for Buffer {
const TYPE: ObjectType = ObjectType::Buffer;
}
impl Object for Image {
const TYPE: ObjectType = ObjectType::Image;
}
impl Object for Swapchain {
const TYPE: ObjectType = ObjectType::Swapchain;
}
impl Object for Flight {
const TYPE: ObjectType = ObjectType::Flight;
}
impl Object for Framebuffer {
const TYPE: ObjectType = ObjectType::Framebuffer;
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum ObjectType {
Buffer = 0,
Image = 1,
Swapchain = 2,
Flight = 3,
Framebuffer = 4,
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct NonExhaustive<'a>(PhantomData<&'a ()>);
impl fmt::Debug for NonExhaustive<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("NonExhaustive")
}
}
const NE: NonExhaustive<'static> = NonExhaustive(PhantomData);
#[cfg(test)]
mod tests {
macro_rules! test_queues {
() => {{
let Ok(library) = vulkano::VulkanLibrary::new() else {
return;
};
let Ok(instance) = vulkano::instance::Instance::new(library, Default::default()) else {
return;
};
let Ok(mut physical_devices) = instance.enumerate_physical_devices() else {
return;
};
let Some(physical_device) = physical_devices.find(|p| {
p.queue_family_properties().iter().any(|q| {
q.queue_flags
.contains(vulkano::device::QueueFlags::GRAPHICS)
})
}) else {
return;
};
let queue_create_infos = physical_device
.queue_family_properties()
.iter()
.enumerate()
.map(|(i, _)| vulkano::device::QueueCreateInfo {
queue_family_index: i as u32,
..Default::default()
})
.collect();
let Ok((device, queues)) = vulkano::device::Device::new(
physical_device,
vulkano::device::DeviceCreateInfo {
queue_create_infos,
..Default::default()
},
) else {
return;
};
(
$crate::resource::Resources::new(&device, &Default::default()),
queues.collect::<Vec<_>>(),
)
}};
}
pub(crate) use test_queues;
}