use super::{ClearColorConfig, Projection};
use crate::{
batching::gpu_preprocessing::GpuPreprocessingSupport,
camera::{CameraProjection, ManualTextureViewHandle, ManualTextureViews},
primitives::Frustum,
render_asset::RenderAssets,
render_graph::{InternedRenderSubGraph, RenderSubGraph},
render_resource::TextureView,
sync_world::TemporaryRenderEntity,
sync_world::{RenderEntity, SyncToRenderWorld},
texture::GpuImage,
view::{
ColorGrading, ExtractedView, ExtractedWindows, GpuCulling, Msaa, RenderLayers,
RenderVisibleEntities, ViewUniformOffset, Visibility, VisibleEntities,
},
Extract,
};
use bevy_asset::{AssetEvent, AssetId, Assets, Handle};
use bevy_derive::{Deref, DerefMut};
use bevy_ecs::{
change_detection::DetectChanges,
component::{Component, ComponentId},
entity::Entity,
event::EventReader,
prelude::With,
query::Has,
reflect::ReflectComponent,
system::{Commands, Query, Res, ResMut, Resource},
world::DeferredWorld,
};
use bevy_image::Image;
use bevy_math::{ops, vec2, Dir3, Mat4, Ray3d, Rect, URect, UVec2, UVec4, Vec2, Vec3};
use bevy_reflect::prelude::*;
use bevy_render_macros::ExtractComponent;
use bevy_transform::components::{GlobalTransform, Transform};
use bevy_utils::{tracing::warn, warn_once, HashMap, HashSet};
use bevy_window::{
NormalizedWindowRef, PrimaryWindow, Window, WindowCreated, WindowRef, WindowResized,
WindowScaleFactorChanged,
};
use core::ops::Range;
use derive_more::derive::From;
use wgpu::{BlendState, TextureFormat, TextureUsages};
#[derive(Reflect, Debug, Clone)]
#[reflect(Default)]
pub struct Viewport {
pub physical_position: UVec2,
pub physical_size: UVec2,
pub depth: Range<f32>,
}
impl Default for Viewport {
fn default() -> Self {
Self {
physical_position: Default::default(),
physical_size: UVec2::new(1, 1),
depth: 0.0..1.0,
}
}
}
#[derive(Debug, Clone, Copy, Reflect, PartialEq)]
pub struct SubCameraView {
pub full_size: UVec2,
pub offset: Vec2,
pub size: UVec2,
}
impl Default for SubCameraView {
fn default() -> Self {
Self {
full_size: UVec2::new(1, 1),
offset: Vec2::new(0., 0.),
size: UVec2::new(1, 1),
}
}
}
#[derive(Default, Debug, Clone)]
pub struct RenderTargetInfo {
pub physical_size: UVec2,
pub scale_factor: f32,
}
#[derive(Default, Debug, Clone)]
pub struct ComputedCameraValues {
clip_from_view: Mat4,
target_info: Option<RenderTargetInfo>,
old_viewport_size: Option<UVec2>,
old_sub_camera_view: Option<SubCameraView>,
}
#[derive(Component, Clone, Copy, Reflect)]
#[reflect(opaque)]
#[reflect(Component, Default)]
pub struct Exposure {
pub ev100: f32,
}
impl Exposure {
pub const SUNLIGHT: Self = Self {
ev100: Self::EV100_SUNLIGHT,
};
pub const OVERCAST: Self = Self {
ev100: Self::EV100_OVERCAST,
};
pub const INDOOR: Self = Self {
ev100: Self::EV100_INDOOR,
};
pub const BLENDER: Self = Self {
ev100: Self::EV100_BLENDER,
};
pub const EV100_SUNLIGHT: f32 = 15.0;
pub const EV100_OVERCAST: f32 = 12.0;
pub const EV100_INDOOR: f32 = 7.0;
pub const EV100_BLENDER: f32 = 9.7;
pub fn from_physical_camera(physical_camera_parameters: PhysicalCameraParameters) -> Self {
Self {
ev100: physical_camera_parameters.ev100(),
}
}
#[inline]
pub fn exposure(&self) -> f32 {
ops::exp2(-self.ev100) / 1.2
}
}
impl Default for Exposure {
fn default() -> Self {
Self::BLENDER
}
}
#[derive(Clone, Copy)]
pub struct PhysicalCameraParameters {
pub aperture_f_stops: f32,
pub shutter_speed_s: f32,
pub sensitivity_iso: f32,
pub sensor_height: f32,
}
impl PhysicalCameraParameters {
pub fn ev100(&self) -> f32 {
ops::log2(
self.aperture_f_stops * self.aperture_f_stops * 100.0
/ (self.shutter_speed_s * self.sensitivity_iso),
)
}
}
impl Default for PhysicalCameraParameters {
fn default() -> Self {
Self {
aperture_f_stops: 1.0,
shutter_speed_s: 1.0 / 125.0,
sensitivity_iso: 100.0,
sensor_height: 0.01866,
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
pub enum ViewportConversionError {
NoViewportSize,
PastNearPlane,
PastFarPlane,
InvalidData,
}
#[derive(Component, Debug, Reflect, Clone)]
#[reflect(Component, Default, Debug)]
#[component(on_add = warn_on_no_render_graph)]
#[require(
Frustum,
CameraMainTextureUsages,
VisibleEntities,
Transform,
Visibility,
Msaa,
SyncToRenderWorld
)]
pub struct Camera {
pub viewport: Option<Viewport>,
pub order: isize,
pub is_active: bool,
#[reflect(ignore)]
pub computed: ComputedCameraValues,
pub target: RenderTarget,
pub hdr: bool,
#[reflect(ignore)]
pub output_mode: CameraOutputMode,
pub msaa_writeback: bool,
pub clear_color: ClearColorConfig,
pub sub_camera_view: Option<SubCameraView>,
}
fn warn_on_no_render_graph(world: DeferredWorld, entity: Entity, _: ComponentId) {
if !world.entity(entity).contains::<CameraRenderGraph>() {
warn!("Entity {entity} has a `Camera` component, but it doesn't have a render graph configured. Consider adding a `Camera2d` or `Camera3d` component, or manually adding a `CameraRenderGraph` component if you need a custom render graph.");
}
}
impl Default for Camera {
fn default() -> Self {
Self {
is_active: true,
order: 0,
viewport: None,
computed: Default::default(),
target: Default::default(),
output_mode: Default::default(),
hdr: false,
msaa_writeback: true,
clear_color: Default::default(),
sub_camera_view: None,
}
}
}
impl Camera {
#[inline]
pub fn to_logical(&self, physical_size: UVec2) -> Option<Vec2> {
let scale = self.computed.target_info.as_ref()?.scale_factor;
Some(physical_size.as_vec2() / scale)
}
#[inline]
pub fn physical_viewport_rect(&self) -> Option<URect> {
let min = self
.viewport
.as_ref()
.map(|v| v.physical_position)
.unwrap_or(UVec2::ZERO);
let max = min + self.physical_viewport_size()?;
Some(URect { min, max })
}
#[inline]
pub fn logical_viewport_rect(&self) -> Option<Rect> {
let URect { min, max } = self.physical_viewport_rect()?;
Some(Rect {
min: self.to_logical(min)?,
max: self.to_logical(max)?,
})
}
#[inline]
pub fn logical_viewport_size(&self) -> Option<Vec2> {
self.viewport
.as_ref()
.and_then(|v| self.to_logical(v.physical_size))
.or_else(|| self.logical_target_size())
}
#[inline]
pub fn physical_viewport_size(&self) -> Option<UVec2> {
self.viewport
.as_ref()
.map(|v| v.physical_size)
.or_else(|| self.physical_target_size())
}
#[inline]
pub fn logical_target_size(&self) -> Option<Vec2> {
self.computed
.target_info
.as_ref()
.and_then(|t| self.to_logical(t.physical_size))
}
#[inline]
pub fn physical_target_size(&self) -> Option<UVec2> {
self.computed.target_info.as_ref().map(|t| t.physical_size)
}
#[inline]
pub fn target_scaling_factor(&self) -> Option<f32> {
self.computed
.target_info
.as_ref()
.map(|t: &RenderTargetInfo| t.scale_factor)
}
#[inline]
pub fn clip_from_view(&self) -> Mat4 {
self.computed.clip_from_view
}
#[doc(alias = "world_to_screen")]
pub fn world_to_viewport(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Result<Vec2, ViewportConversionError> {
let target_size = self
.logical_viewport_size()
.ok_or(ViewportConversionError::NoViewportSize)?;
let ndc_space_coords = self
.world_to_ndc(camera_transform, world_position)
.ok_or(ViewportConversionError::InvalidData)?;
if ndc_space_coords.z < 0.0 {
return Err(ViewportConversionError::PastNearPlane);
}
if ndc_space_coords.z > 1.0 {
return Err(ViewportConversionError::PastFarPlane);
}
let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size;
viewport_position.y = target_size.y - viewport_position.y;
Ok(viewport_position)
}
#[doc(alias = "world_to_screen_with_depth")]
pub fn world_to_viewport_with_depth(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Result<Vec3, ViewportConversionError> {
let target_size = self
.logical_viewport_size()
.ok_or(ViewportConversionError::NoViewportSize)?;
let ndc_space_coords = self
.world_to_ndc(camera_transform, world_position)
.ok_or(ViewportConversionError::InvalidData)?;
if ndc_space_coords.z < 0.0 {
return Err(ViewportConversionError::PastNearPlane);
}
if ndc_space_coords.z > 1.0 {
return Err(ViewportConversionError::PastFarPlane);
}
let depth = -self.depth_ndc_to_view_z(ndc_space_coords.z);
let mut viewport_position = (ndc_space_coords.truncate() + Vec2::ONE) / 2.0 * target_size;
viewport_position.y = target_size.y - viewport_position.y;
Ok(viewport_position.extend(depth))
}
pub fn viewport_to_world(
&self,
camera_transform: &GlobalTransform,
mut viewport_position: Vec2,
) -> Result<Ray3d, ViewportConversionError> {
let target_size = self
.logical_viewport_size()
.ok_or(ViewportConversionError::NoViewportSize)?;
viewport_position.y = target_size.y - viewport_position.y;
let ndc = viewport_position * 2. / target_size - Vec2::ONE;
let ndc_to_world =
camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
let world_near_plane = ndc_to_world.project_point3(ndc.extend(1.));
let world_far_plane = ndc_to_world.project_point3(ndc.extend(f32::EPSILON));
Dir3::new(world_far_plane - world_near_plane)
.map_err(|_| ViewportConversionError::InvalidData)
.map(|direction| Ray3d {
origin: world_near_plane,
direction,
})
}
pub fn viewport_to_world_2d(
&self,
camera_transform: &GlobalTransform,
mut viewport_position: Vec2,
) -> Result<Vec2, ViewportConversionError> {
let target_size = self
.logical_viewport_size()
.ok_or(ViewportConversionError::NoViewportSize)?;
viewport_position.y = target_size.y - viewport_position.y;
let ndc = viewport_position * 2. / target_size - Vec2::ONE;
let world_near_plane = self
.ndc_to_world(camera_transform, ndc.extend(1.))
.ok_or(ViewportConversionError::InvalidData)?;
Ok(world_near_plane.truncate())
}
pub fn world_to_ndc(
&self,
camera_transform: &GlobalTransform,
world_position: Vec3,
) -> Option<Vec3> {
let clip_from_world: Mat4 =
self.computed.clip_from_view * camera_transform.compute_matrix().inverse();
let ndc_space_coords: Vec3 = clip_from_world.project_point3(world_position);
(!ndc_space_coords.is_nan()).then_some(ndc_space_coords)
}
pub fn ndc_to_world(&self, camera_transform: &GlobalTransform, ndc: Vec3) -> Option<Vec3> {
let ndc_to_world =
camera_transform.compute_matrix() * self.computed.clip_from_view.inverse();
let world_space_coords = ndc_to_world.project_point3(ndc);
(!world_space_coords.is_nan()).then_some(world_space_coords)
}
pub fn depth_ndc_to_view_z(&self, ndc_depth: f32) -> f32 {
let near = self.clip_from_view().w_axis.z; -near / ndc_depth
}
pub fn depth_ndc_to_view_z_2d(&self, ndc_depth: f32) -> f32 {
-(self.clip_from_view().w_axis.z - ndc_depth) / self.clip_from_view().z_axis.z
}
}
#[derive(Debug, Clone, Copy)]
pub enum CameraOutputMode {
Write {
blend_state: Option<BlendState>,
clear_color: ClearColorConfig,
},
Skip,
}
impl Default for CameraOutputMode {
fn default() -> Self {
CameraOutputMode::Write {
blend_state: None,
clear_color: ClearColorConfig::Default,
}
}
}
#[derive(Component, Debug, Deref, DerefMut, Reflect, Clone)]
#[reflect(opaque)]
#[reflect(Component, Debug)]
pub struct CameraRenderGraph(InternedRenderSubGraph);
impl CameraRenderGraph {
#[inline]
pub fn new<T: RenderSubGraph>(name: T) -> Self {
Self(name.intern())
}
#[inline]
pub fn set<T: RenderSubGraph>(&mut self, name: T) {
self.0 = name.intern();
}
}
#[derive(Debug, Clone, Reflect, From)]
pub enum RenderTarget {
Window(WindowRef),
Image(Handle<Image>),
TextureView(ManualTextureViewHandle),
}
impl Default for RenderTarget {
fn default() -> Self {
Self::Window(Default::default())
}
}
#[derive(Debug, Clone, Reflect, PartialEq, Eq, Hash, PartialOrd, Ord, From)]
pub enum NormalizedRenderTarget {
Window(NormalizedWindowRef),
Image(Handle<Image>),
TextureView(ManualTextureViewHandle),
}
impl RenderTarget {
pub fn normalize(&self, primary_window: Option<Entity>) -> Option<NormalizedRenderTarget> {
match self {
RenderTarget::Window(window_ref) => window_ref
.normalize(primary_window)
.map(NormalizedRenderTarget::Window),
RenderTarget::Image(handle) => Some(NormalizedRenderTarget::Image(handle.clone())),
RenderTarget::TextureView(id) => Some(NormalizedRenderTarget::TextureView(*id)),
}
}
pub fn as_image(&self) -> Option<&Handle<Image>> {
if let Self::Image(handle) = self {
Some(handle)
} else {
None
}
}
}
impl NormalizedRenderTarget {
pub fn get_texture_view<'a>(
&self,
windows: &'a ExtractedWindows,
images: &'a RenderAssets<GpuImage>,
manual_texture_views: &'a ManualTextureViews,
) -> Option<&'a TextureView> {
match self {
NormalizedRenderTarget::Window(window_ref) => windows
.get(&window_ref.entity())
.and_then(|window| window.swap_chain_texture_view.as_ref()),
NormalizedRenderTarget::Image(image_handle) => {
images.get(image_handle).map(|image| &image.texture_view)
}
NormalizedRenderTarget::TextureView(id) => {
manual_texture_views.get(id).map(|tex| &tex.texture_view)
}
}
}
pub fn get_texture_format<'a>(
&self,
windows: &'a ExtractedWindows,
images: &'a RenderAssets<GpuImage>,
manual_texture_views: &'a ManualTextureViews,
) -> Option<TextureFormat> {
match self {
NormalizedRenderTarget::Window(window_ref) => windows
.get(&window_ref.entity())
.and_then(|window| window.swap_chain_texture_format),
NormalizedRenderTarget::Image(image_handle) => {
images.get(image_handle).map(|image| image.texture_format)
}
NormalizedRenderTarget::TextureView(id) => {
manual_texture_views.get(id).map(|tex| tex.format)
}
}
}
pub fn get_render_target_info<'a>(
&self,
resolutions: impl IntoIterator<Item = (Entity, &'a Window)>,
images: &Assets<Image>,
manual_texture_views: &ManualTextureViews,
) -> Option<RenderTargetInfo> {
match self {
NormalizedRenderTarget::Window(window_ref) => resolutions
.into_iter()
.find(|(entity, _)| *entity == window_ref.entity())
.map(|(_, window)| RenderTargetInfo {
physical_size: window.physical_size(),
scale_factor: window.resolution.scale_factor(),
}),
NormalizedRenderTarget::Image(image_handle) => {
let image = images.get(image_handle)?;
Some(RenderTargetInfo {
physical_size: image.size(),
scale_factor: 1.0,
})
}
NormalizedRenderTarget::TextureView(id) => {
manual_texture_views.get(id).map(|tex| RenderTargetInfo {
physical_size: tex.size,
scale_factor: 1.0,
})
}
}
}
fn is_changed(
&self,
changed_window_ids: &HashSet<Entity>,
changed_image_handles: &HashSet<&AssetId<Image>>,
) -> bool {
match self {
NormalizedRenderTarget::Window(window_ref) => {
changed_window_ids.contains(&window_ref.entity())
}
NormalizedRenderTarget::Image(image_handle) => {
changed_image_handles.contains(&image_handle.id())
}
NormalizedRenderTarget::TextureView(_) => true,
}
}
}
#[allow(clippy::too_many_arguments)]
pub fn camera_system<T: CameraProjection + Component>(
mut window_resized_events: EventReader<WindowResized>,
mut window_created_events: EventReader<WindowCreated>,
mut window_scale_factor_changed_events: EventReader<WindowScaleFactorChanged>,
mut image_asset_events: EventReader<AssetEvent<Image>>,
primary_window: Query<Entity, With<PrimaryWindow>>,
windows: Query<(Entity, &Window)>,
images: Res<Assets<Image>>,
manual_texture_views: Res<ManualTextureViews>,
mut cameras: Query<(&mut Camera, &mut T)>,
) {
let primary_window = primary_window.iter().next();
let mut changed_window_ids = HashSet::new();
changed_window_ids.extend(window_created_events.read().map(|event| event.window));
changed_window_ids.extend(window_resized_events.read().map(|event| event.window));
let scale_factor_changed_window_ids: HashSet<_> = window_scale_factor_changed_events
.read()
.map(|event| event.window)
.collect();
changed_window_ids.extend(scale_factor_changed_window_ids.clone());
let changed_image_handles: HashSet<&AssetId<Image>> = image_asset_events
.read()
.filter_map(|event| match event {
AssetEvent::Modified { id } | AssetEvent::Added { id } => Some(id),
_ => None,
})
.collect();
for (mut camera, mut camera_projection) in &mut cameras {
let mut viewport_size = camera
.viewport
.as_ref()
.map(|viewport| viewport.physical_size);
if let Some(normalized_target) = camera.target.normalize(primary_window) {
if normalized_target.is_changed(&changed_window_ids, &changed_image_handles)
|| camera.is_added()
|| camera_projection.is_changed()
|| camera.computed.old_viewport_size != viewport_size
|| camera.computed.old_sub_camera_view != camera.sub_camera_view
{
let new_computed_target_info = normalized_target.get_render_target_info(
&windows,
&images,
&manual_texture_views,
);
if normalized_target.is_changed(&scale_factor_changed_window_ids, &HashSet::new()) {
if let (Some(new_scale_factor), Some(old_scale_factor)) = (
new_computed_target_info
.as_ref()
.map(|info| info.scale_factor),
camera
.computed
.target_info
.as_ref()
.map(|info| info.scale_factor),
) {
let resize_factor = new_scale_factor / old_scale_factor;
if let Some(ref mut viewport) = camera.viewport {
let resize = |vec: UVec2| (vec.as_vec2() * resize_factor).as_uvec2();
viewport.physical_position = resize(viewport.physical_position);
viewport.physical_size = resize(viewport.physical_size);
viewport_size = Some(viewport.physical_size);
}
}
}
if let Some(viewport) = &mut camera.viewport {
let target_info = &new_computed_target_info;
if let Some(target) = target_info {
if viewport.physical_size.x > target.physical_size.x {
viewport.physical_size.x = target.physical_size.x;
}
if viewport.physical_size.y > target.physical_size.y {
viewport.physical_size.y = target.physical_size.y;
}
}
}
camera.computed.target_info = new_computed_target_info;
if let Some(size) = camera.logical_viewport_size() {
if size.x != 0.0 && size.y != 0.0 {
camera_projection.update(size.x, size.y);
camera.computed.clip_from_view = match &camera.sub_camera_view {
Some(sub_view) => {
camera_projection.get_clip_from_view_for_sub(sub_view)
}
None => camera_projection.get_clip_from_view(),
}
}
}
}
}
if camera.computed.old_viewport_size != viewport_size {
camera.computed.old_viewport_size = viewport_size;
}
if camera.computed.old_sub_camera_view != camera.sub_camera_view {
camera.computed.old_sub_camera_view = camera.sub_camera_view;
}
}
}
#[derive(Component, ExtractComponent, Clone, Copy, Reflect)]
#[reflect(opaque)]
#[reflect(Component, Default)]
pub struct CameraMainTextureUsages(pub TextureUsages);
impl Default for CameraMainTextureUsages {
fn default() -> Self {
Self(
TextureUsages::RENDER_ATTACHMENT
| TextureUsages::TEXTURE_BINDING
| TextureUsages::COPY_SRC,
)
}
}
#[derive(Component, Debug)]
pub struct ExtractedCamera {
pub target: Option<NormalizedRenderTarget>,
pub physical_viewport_size: Option<UVec2>,
pub physical_target_size: Option<UVec2>,
pub viewport: Option<Viewport>,
pub render_graph: InternedRenderSubGraph,
pub order: isize,
pub output_mode: CameraOutputMode,
pub msaa_writeback: bool,
pub clear_color: ClearColorConfig,
pub sorted_camera_index_for_target: usize,
pub exposure: f32,
pub hdr: bool,
}
pub fn extract_cameras(
mut commands: Commands,
query: Extract<
Query<(
RenderEntity,
&Camera,
&CameraRenderGraph,
&GlobalTransform,
&VisibleEntities,
&Frustum,
Option<&ColorGrading>,
Option<&Exposure>,
Option<&TemporalJitter>,
Option<&RenderLayers>,
Option<&Projection>,
Has<GpuCulling>,
)>,
>,
primary_window: Extract<Query<Entity, With<PrimaryWindow>>>,
gpu_preprocessing_support: Res<GpuPreprocessingSupport>,
mapper: Extract<Query<&RenderEntity>>,
) {
let primary_window = primary_window.iter().next();
for (
render_entity,
camera,
camera_render_graph,
transform,
visible_entities,
frustum,
color_grading,
exposure,
temporal_jitter,
render_layers,
projection,
gpu_culling,
) in query.iter()
{
if !camera.is_active {
commands.entity(render_entity).remove::<(
ExtractedCamera,
ExtractedView,
RenderVisibleEntities,
TemporalJitter,
RenderLayers,
Projection,
GpuCulling,
ViewUniformOffset,
)>();
continue;
}
let color_grading = color_grading.unwrap_or(&ColorGrading::default()).clone();
if let (
Some(URect {
min: viewport_origin,
..
}),
Some(viewport_size),
Some(target_size),
) = (
camera.physical_viewport_rect(),
camera.physical_viewport_size(),
camera.physical_target_size(),
) {
if target_size.x == 0 || target_size.y == 0 {
continue;
}
let render_visible_entities = RenderVisibleEntities {
entities: visible_entities
.entities
.iter()
.map(|(type_id, entities)| {
let entities = entities
.iter()
.map(|entity| {
let render_entity = mapper
.get(*entity)
.cloned()
.map(|entity| entity.id())
.unwrap_or_else(|_e| {
commands.spawn(TemporaryRenderEntity).id()
});
(render_entity, (*entity).into())
})
.collect();
(*type_id, entities)
})
.collect(),
};
let mut commands = commands.entity(render_entity);
commands.insert((
ExtractedCamera {
target: camera.target.normalize(primary_window),
viewport: camera.viewport.clone(),
physical_viewport_size: Some(viewport_size),
physical_target_size: Some(target_size),
render_graph: camera_render_graph.0,
order: camera.order,
output_mode: camera.output_mode,
msaa_writeback: camera.msaa_writeback,
clear_color: camera.clear_color,
sorted_camera_index_for_target: 0,
exposure: exposure
.map(Exposure::exposure)
.unwrap_or_else(|| Exposure::default().exposure()),
hdr: camera.hdr,
},
ExtractedView {
clip_from_view: camera.clip_from_view(),
world_from_view: *transform,
clip_from_world: None,
hdr: camera.hdr,
viewport: UVec4::new(
viewport_origin.x,
viewport_origin.y,
viewport_size.x,
viewport_size.y,
),
color_grading,
},
render_visible_entities,
*frustum,
));
if let Some(temporal_jitter) = temporal_jitter {
commands.insert(temporal_jitter.clone());
}
if let Some(render_layers) = render_layers {
commands.insert(render_layers.clone());
}
if let Some(perspective) = projection {
commands.insert(perspective.clone());
}
if gpu_culling {
if *gpu_preprocessing_support == GpuPreprocessingSupport::Culling {
commands.insert(GpuCulling);
} else {
warn_once!(
"GPU culling isn't supported on this platform; ignoring `GpuCulling`."
);
}
}
};
}
}
#[derive(Resource, Default)]
pub struct SortedCameras(pub Vec<SortedCamera>);
pub struct SortedCamera {
pub entity: Entity,
pub order: isize,
pub target: Option<NormalizedRenderTarget>,
pub hdr: bool,
}
pub fn sort_cameras(
mut sorted_cameras: ResMut<SortedCameras>,
mut cameras: Query<(Entity, &mut ExtractedCamera)>,
) {
sorted_cameras.0.clear();
for (entity, camera) in cameras.iter() {
sorted_cameras.0.push(SortedCamera {
entity,
order: camera.order,
target: camera.target.clone(),
hdr: camera.hdr,
});
}
sorted_cameras
.0
.sort_by(|c1, c2| match c1.order.cmp(&c2.order) {
core::cmp::Ordering::Equal => c1.target.cmp(&c2.target),
ord => ord,
});
let mut previous_order_target = None;
let mut ambiguities = HashSet::new();
let mut target_counts = HashMap::new();
for sorted_camera in &mut sorted_cameras.0 {
let new_order_target = (sorted_camera.order, sorted_camera.target.clone());
if let Some(previous_order_target) = previous_order_target {
if previous_order_target == new_order_target {
ambiguities.insert(new_order_target.clone());
}
}
if let Some(target) = &sorted_camera.target {
let count = target_counts
.entry((target.clone(), sorted_camera.hdr))
.or_insert(0usize);
let (_, mut camera) = cameras.get_mut(sorted_camera.entity).unwrap();
camera.sorted_camera_index_for_target = *count;
*count += 1;
}
previous_order_target = Some(new_order_target);
}
if !ambiguities.is_empty() {
warn!(
"Camera order ambiguities detected for active cameras with the following priorities: {:?}. \
To fix this, ensure there is exactly one Camera entity spawned with a given order for a given RenderTarget. \
Ambiguities should be resolved because either (1) multiple active cameras were spawned accidentally, which will \
result in rendering multiple instances of the scene or (2) for cases where multiple active cameras is intentional, \
ambiguities could result in unpredictable render results.",
ambiguities
);
}
}
#[derive(Component, Clone, Default, Reflect)]
#[reflect(Default, Component)]
pub struct TemporalJitter {
pub offset: Vec2,
}
impl TemporalJitter {
pub fn jitter_projection(&self, clip_from_view: &mut Mat4, view_size: Vec2) {
if clip_from_view.w_axis.w == 1.0 {
warn!(
"TemporalJitter not supported with OrthographicProjection. Use PerspectiveProjection instead."
);
return;
}
let jitter = (self.offset * vec2(2.0, -2.0)) / view_size;
clip_from_view.z_axis.x += jitter.x;
clip_from_view.z_axis.y += jitter.y;
}
}
#[derive(Default, Component, Reflect)]
#[reflect(Default, Component)]
pub struct MipBias(pub f32);