fyrox_impl/scene/
camera.rs

1// Copyright (c) 2019-present Dmitry Stepanov and Fyrox Engine contributors.
2//
3// Permission is hereby granted, free of charge, to any person obtaining a copy
4// of this software and associated documentation files (the "Software"), to deal
5// in the Software without restriction, including without limitation the rights
6// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7// copies of the Software, and to permit persons to whom the Software is
8// furnished to do so, subject to the following conditions:
9//
10// The above copyright notice and this permission notice shall be included in all
11// copies or substantial portions of the Software.
12//
13// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19// SOFTWARE.
20
21//! Contains all methods and structures to create and manage cameras. See [`Camera`] docs for more info.
22
23use crate::{
24    asset::{state::LoadError, untyped::ResourceKind},
25    core::{
26        algebra::{Matrix4, Point3, Vector2, Vector3, Vector4},
27        color::Color,
28        math::{aabb::AxisAlignedBoundingBox, frustum::Frustum, ray::Ray, Rect},
29        pool::Handle,
30        reflect::prelude::*,
31        type_traits::prelude::*,
32        uuid::{uuid, Uuid},
33        uuid_provider,
34        variable::InheritableVariable,
35        visitor::{Visit, VisitResult, Visitor},
36    },
37    graph::BaseSceneGraph,
38    resource::texture::{
39        TextureKind, TexturePixelKind, TextureResource, TextureResourceExtension, TextureWrapMode,
40    },
41    scene::{
42        base::{Base, BaseBuilder},
43        debug::SceneDrawingContext,
44        graph::Graph,
45        node::constructor::NodeConstructor,
46        node::{Node, NodeTrait, UpdateContext},
47    },
48};
49use fyrox_graph::constructor::ConstructorProvider;
50use serde::{Deserialize, Serialize};
51use std::{
52    fmt::{Display, Formatter},
53    ops::{Deref, DerefMut},
54};
55use strum_macros::{AsRefStr, EnumString, VariantNames};
56
57/// Perspective projection make parallel lines to converge at some point. Objects will be smaller
58/// with increasing distance. This the projection type "used" by human eyes, photographic lens and
59/// it looks most realistic.
60#[derive(Reflect, Clone, Debug, PartialEq, Visit, Serialize, Deserialize)]
61pub struct PerspectiveProjection {
62    /// Vertical angle at the top of viewing frustum, in radians. Larger values will increase field
63    /// of view and create fish-eye effect, smaller values could be used to create "binocular" effect
64    /// or scope effect.
65    #[reflect(min_value = 0.0, max_value = 6.28, step = 0.1)]
66    pub fov: f32,
67    /// Location of the near clipping plane. If it is larger than [`Self::z_far`] then it will be
68    /// treated like far clipping plane.
69    #[reflect(min_value = 0.0, step = 0.1)]
70    pub z_near: f32,
71    /// Location of the far clipping plane. If it is less than [`Self::z_near`] then it will be
72    /// treated like near clipping plane.
73    #[reflect(min_value = 0.0, step = 0.1)]
74    pub z_far: f32,
75}
76
77impl Default for PerspectiveProjection {
78    fn default() -> Self {
79        Self {
80            fov: 75.0f32.to_radians(),
81            z_near: 0.025,
82            z_far: 2048.0,
83        }
84    }
85}
86
87impl PerspectiveProjection {
88    /// Returns perspective projection matrix.
89    #[inline]
90    pub fn matrix(&self, frame_size: Vector2<f32>) -> Matrix4<f32> {
91        let limit = 10.0 * f32::EPSILON;
92
93        let z_near = self.z_far.min(self.z_near);
94        let mut z_far = self.z_far.max(self.z_near);
95
96        // Prevent planes from superimposing which could cause panic.
97        if z_far - z_near < limit {
98            z_far += limit;
99        }
100
101        Matrix4::new_perspective(
102            (frame_size.x / frame_size.y).max(limit),
103            self.fov,
104            z_near,
105            z_far,
106        )
107    }
108}
109
110/// Parallel projection. Object's size won't be affected by distance from the viewer, it can be
111/// used for 2D games.
112#[derive(Reflect, Clone, Debug, PartialEq, Visit, Serialize, Deserialize)]
113pub struct OrthographicProjection {
114    /// Location of the near clipping plane. If it is larger than [`Self::z_far`] then it will be
115    /// treated like far clipping plane.
116    #[reflect(min_value = 0.0, step = 0.1)]
117    pub z_near: f32,
118    /// Location of the far clipping plane. If it is less than [`Self::z_near`] then it will be
119    /// treated like near clipping plane.
120    #[reflect(min_value = 0.0, step = 0.1)]
121    pub z_far: f32,
122    /// Vertical size of the "view box". Horizontal size is derived value and depends on the aspect
123    /// ratio of the viewport. Any values very close to zero (from both sides) will be clamped to
124    /// some minimal value to prevent singularities from occuring.
125    #[reflect(step = 0.1)]
126    pub vertical_size: f32,
127}
128
129impl Default for OrthographicProjection {
130    fn default() -> Self {
131        Self {
132            z_near: 0.0,
133            z_far: 2048.0,
134            vertical_size: 5.0,
135        }
136    }
137}
138
139impl OrthographicProjection {
140    /// Returns orthographic projection matrix.
141    #[inline]
142    pub fn matrix(&self, frame_size: Vector2<f32>) -> Matrix4<f32> {
143        fn clamp_to_limit_signed(value: f32, limit: f32) -> f32 {
144            if value < 0.0 && -value < limit {
145                -limit
146            } else if value >= 0.0 && value < limit {
147                limit
148            } else {
149                value
150            }
151        }
152
153        let limit = 10.0 * f32::EPSILON;
154
155        let aspect = (frame_size.x / frame_size.y).max(limit);
156
157        // Prevent collapsing projection "box" into a point, which could cause panic.
158        let vertical_size = clamp_to_limit_signed(self.vertical_size, limit);
159        let horizontal_size = clamp_to_limit_signed(aspect * vertical_size, limit);
160
161        let z_near = self.z_far.min(self.z_near);
162        let mut z_far = self.z_far.max(self.z_near);
163
164        // Prevent planes from superimposing which could cause panic.
165        if z_far - z_near < limit {
166            z_far += limit;
167        }
168
169        let left = -horizontal_size;
170        let top = vertical_size;
171        let right = horizontal_size;
172        let bottom = -vertical_size;
173        Matrix4::new_orthographic(left, right, bottom, top, z_near, z_far)
174    }
175}
176
177/// A method of projection. Different projection types suitable for different purposes:
178///
179/// 1) Perspective projection most useful for 3D games, it makes a scene to look most natural,
180/// objects will look smaller with increasing distance.
181/// 2) Orthographic projection most useful for 2D games, objects won't look smaller with increasing
182/// distance.
183#[derive(
184    Reflect,
185    Clone,
186    Debug,
187    PartialEq,
188    Visit,
189    AsRefStr,
190    EnumString,
191    VariantNames,
192    Serialize,
193    Deserialize,
194)]
195pub enum Projection {
196    /// See [`PerspectiveProjection`] docs.
197    Perspective(PerspectiveProjection),
198    /// See [`OrthographicProjection`] docs.
199    Orthographic(OrthographicProjection),
200}
201
202uuid_provider!(Projection = "0eb5bec0-fc4e-4945-99b6-e6c5392ad971");
203
204impl Projection {
205    /// Sets the new value for the near clipping plane.
206    #[inline]
207    #[must_use]
208    pub fn with_z_near(mut self, z_near: f32) -> Self {
209        match self {
210            Projection::Perspective(ref mut v) => v.z_near = z_near,
211            Projection::Orthographic(ref mut v) => v.z_near = z_near,
212        }
213        self
214    }
215
216    /// Sets the new value for the far clipping plane.
217    #[inline]
218    #[must_use]
219    pub fn with_z_far(mut self, z_far: f32) -> Self {
220        match self {
221            Projection::Perspective(ref mut v) => v.z_far = z_far,
222            Projection::Orthographic(ref mut v) => v.z_far = z_far,
223        }
224        self
225    }
226
227    /// Sets the new value for the near clipping plane.
228    #[inline]
229    pub fn set_z_near(&mut self, z_near: f32) {
230        match self {
231            Projection::Perspective(v) => v.z_near = z_near,
232            Projection::Orthographic(v) => v.z_near = z_near,
233        }
234    }
235
236    /// Sets the new value for the far clipping plane.
237    #[inline]
238    pub fn set_z_far(&mut self, z_far: f32) {
239        match self {
240            Projection::Perspective(v) => v.z_far = z_far,
241            Projection::Orthographic(v) => v.z_far = z_far,
242        }
243    }
244
245    /// Returns near clipping plane distance.
246    #[inline]
247    pub fn z_near(&self) -> f32 {
248        match self {
249            Projection::Perspective(v) => v.z_near,
250            Projection::Orthographic(v) => v.z_near,
251        }
252    }
253
254    /// Returns far clipping plane distance.
255    #[inline]
256    pub fn z_far(&self) -> f32 {
257        match self {
258            Projection::Perspective(v) => v.z_far,
259            Projection::Orthographic(v) => v.z_far,
260        }
261    }
262
263    /// Returns projection matrix.
264    #[inline]
265    pub fn matrix(&self, frame_size: Vector2<f32>) -> Matrix4<f32> {
266        match self {
267            Projection::Perspective(v) => v.matrix(frame_size),
268            Projection::Orthographic(v) => v.matrix(frame_size),
269        }
270    }
271
272    /// Returns `true` if the current projection is perspective.
273    #[inline]
274    pub fn is_perspective(&self) -> bool {
275        matches!(self, Projection::Perspective(_))
276    }
277
278    /// Returns `true` if the current projection is orthographic.
279    #[inline]
280    pub fn is_orthographic(&self) -> bool {
281        matches!(self, Projection::Orthographic(_))
282    }
283}
284
285impl Default for Projection {
286    fn default() -> Self {
287        Self::Perspective(PerspectiveProjection::default())
288    }
289}
290
291/// Exposure is a parameter that describes how many light should be collected for one
292/// frame. The higher the value, the more brighter the final frame will be and vice versa.
293#[derive(Visit, Copy, Clone, PartialEq, Debug, Reflect, AsRefStr, EnumString, VariantNames)]
294pub enum Exposure {
295    /// Automatic exposure based on the frame luminance. High luminance values will result
296    /// in lower exposure levels and vice versa. This is default option.
297    ///
298    /// # Equation
299    ///
300    /// `exposure = key_value / clamp(avg_luminance, min_luminance, max_luminance)`
301    Auto {
302        /// A key value in the formula above. Default is 0.01556.
303        #[reflect(min_value = 0.0, step = 0.1)]
304        key_value: f32,
305        /// A min luminance value in the formula above. Default is 0.00778.
306        #[reflect(min_value = 0.0, step = 0.1)]
307        min_luminance: f32,
308        /// A max luminance value in the formula above. Default is 64.0.
309        #[reflect(min_value = 0.0, step = 0.1)]
310        max_luminance: f32,
311    },
312
313    /// Specific exposure level. To "disable" any HDR effects use [`std::f32::consts::E`] as a value.
314    Manual(f32),
315}
316
317uuid_provider!(Exposure = "0e35ee3d-8baa-4b0c-b3dd-6c31a08c121e");
318
319impl Default for Exposure {
320    fn default() -> Self {
321        Self::Auto {
322            key_value: 0.01556,
323            min_luminance: 0.00778,
324            max_luminance: 64.0,
325        }
326    }
327}
328
329/// Camera allows you to see world from specific point in world. You must have at least one camera in
330/// your scene to see anything.
331///
332/// ## Projection
333///
334/// There are two main projection modes supported by Camera node: perspective and orthogonal projections.
335/// Perspective projection is used primarily to display 3D scenes, while orthogonal projection could be
336/// used for both 3D and 2D. Orthogonal projection could also be used in CAD software.
337///
338/// ## Skybox
339///
340/// Skybox is a cube around the camera with six textures forming seamless "sky". It could be anything,
341/// starting from simple blue sky and ending with outer space.
342///
343/// ## Multiple cameras
344///
345/// Fyrox supports multiple cameras per scene, it means that you can create split screen games, make
346/// picture-in-picture insertions in your main camera view and any other combinations you need.
347///
348/// ## Performance
349///
350/// Each camera forces engine to re-render same scene one more time, which may cause almost double load
351/// of your GPU.
352#[derive(Debug, Visit, Reflect, Clone, ComponentProvider)]
353#[reflect(derived_type = "Node")]
354pub struct Camera {
355    base: Base,
356
357    #[reflect(setter = "set_projection")]
358    projection: InheritableVariable<Projection>,
359
360    #[reflect(setter = "set_viewport")]
361    viewport: InheritableVariable<Rect<f32>>,
362
363    #[reflect(setter = "set_enabled")]
364    enabled: InheritableVariable<bool>,
365
366    #[reflect(setter = "set_environment")]
367    environment: InheritableVariable<Option<TextureResource>>,
368
369    #[reflect(setter = "set_exposure")]
370    exposure: InheritableVariable<Exposure>,
371
372    #[reflect(setter = "set_color_grading_lut")]
373    color_grading_lut: InheritableVariable<Option<ColorGradingLut>>,
374
375    #[reflect(setter = "set_color_grading_enabled")]
376    color_grading_enabled: InheritableVariable<bool>,
377
378    #[reflect(setter = "set_render_target")]
379    #[visit(skip)]
380    render_target: Option<TextureResource>,
381
382    #[visit(skip)]
383    #[reflect(hidden)]
384    view_matrix: Matrix4<f32>,
385
386    #[visit(skip)]
387    #[reflect(hidden)]
388    projection_matrix: Matrix4<f32>,
389}
390
391impl Deref for Camera {
392    type Target = Base;
393
394    fn deref(&self) -> &Self::Target {
395        &self.base
396    }
397}
398
399impl DerefMut for Camera {
400    fn deref_mut(&mut self) -> &mut Self::Target {
401        &mut self.base
402    }
403}
404
405impl Default for Camera {
406    fn default() -> Self {
407        CameraBuilder::new(BaseBuilder::new()).build_camera()
408    }
409}
410
411impl TypeUuidProvider for Camera {
412    fn type_uuid() -> Uuid {
413        uuid!("198d3aca-433c-4ce1-bb25-3190699b757f")
414    }
415}
416
417/// A set of camera fitting parameters for different projection modes. You should take these parameters
418/// and modify camera position and projection accordingly. In case of perspective projection all you need
419/// to do is to set new world-space position of the camera. In cae of orthographic projection, do previous
420/// step and also modify vertical size of orthographic projection (see [`OrthographicProjection`] for more
421/// info).
422pub enum FitParameters {
423    /// Fitting parameters for perspective projection.
424    Perspective {
425        /// New world-space position of the camera.
426        position: Vector3<f32>,
427        /// Distance from the center of an AABB of the object to the `position`.
428        distance: f32,
429    },
430    /// Fitting parameters for orthographic projection.
431    Orthographic {
432        /// New world-space position of the camera.
433        position: Vector3<f32>,
434        /// New vertical size for orthographic projection.
435        vertical_size: f32,
436    },
437}
438
439impl FitParameters {
440    fn fallback_perspective() -> Self {
441        Self::Perspective {
442            position: Default::default(),
443            distance: 1.0,
444        }
445    }
446}
447
448impl Camera {
449    /// Explicitly calculates view and projection matrices. Normally, you should not call
450    /// this method, it will be called automatically when new frame starts.
451    #[inline]
452    pub fn calculate_matrices(&mut self, frame_size: Vector2<f32>) {
453        let pos = self.base.global_position();
454        let look = self.base.look_vector();
455        let up = self.base.up_vector();
456
457        self.view_matrix = Matrix4::look_at_rh(&Point3::from(pos), &Point3::from(pos + look), &up);
458        self.projection_matrix = self.projection.matrix(frame_size);
459    }
460
461    /// Sets new viewport in resolution-independent format. In other words
462    /// each parameter of viewport defines portion of your current resolution
463    /// in percents. In example viewport (0.0, 0.0, 0.5, 1.0) will force camera
464    /// to use left half of your screen and (0.5, 0.0, 0.5, 1.0) - right half.
465    /// Why not just use pixels directly? Because you can change resolution while
466    /// your application is running and you'd be force to manually recalculate
467    /// pixel values everytime when resolution changes.
468    pub fn set_viewport(&mut self, mut viewport: Rect<f32>) -> Rect<f32> {
469        viewport.position.x = viewport.position.x.clamp(0.0, 1.0);
470        viewport.position.y = viewport.position.y.clamp(0.0, 1.0);
471        viewport.size.x = viewport.size.x.clamp(0.0, 1.0);
472        viewport.size.y = viewport.size.y.clamp(0.0, 1.0);
473        self.viewport.set_value_and_mark_modified(viewport)
474    }
475
476    /// Returns current viewport.
477    pub fn viewport(&self) -> Rect<f32> {
478        *self.viewport
479    }
480
481    /// Calculates viewport rectangle in pixels based on internal resolution-independent
482    /// viewport. It is useful when you need to get real viewport rectangle in pixels.
483    ///
484    /// # Notes
485    ///
486    /// Viewport cannot be less than 1x1 pixel in size, so the method clamps values to
487    /// range `[1; infinity]`. This is strictly needed because having viewport of 0 in size
488    /// will cause panics in various places. It happens because viewport size is used as
489    /// divisor in math formulas, but you cannot divide by zero.
490    #[inline]
491    pub fn viewport_pixels(&self, frame_size: Vector2<f32>) -> Rect<i32> {
492        Rect::new(
493            (self.viewport.x() * frame_size.x) as i32,
494            (self.viewport.y() * frame_size.y) as i32,
495            ((self.viewport.w() * frame_size.x) as i32).max(1),
496            ((self.viewport.h() * frame_size.y) as i32).max(1),
497        )
498    }
499
500    /// Returns current view-projection matrix.
501    #[inline]
502    pub fn view_projection_matrix(&self) -> Matrix4<f32> {
503        self.projection_matrix * self.view_matrix
504    }
505
506    /// Returns current projection matrix.
507    #[inline]
508    pub fn projection_matrix(&self) -> Matrix4<f32> {
509        self.projection_matrix
510    }
511
512    /// Returns current view matrix.
513    #[inline]
514    pub fn view_matrix(&self) -> Matrix4<f32> {
515        self.view_matrix
516    }
517
518    /// Returns inverse view matrix.
519    #[inline]
520    pub fn inv_view_matrix(&self) -> Option<Matrix4<f32>> {
521        self.view_matrix.try_inverse()
522    }
523
524    /// Returns current projection mode.
525    #[inline]
526    pub fn projection(&self) -> &Projection {
527        &self.projection
528    }
529
530    /// Returns current projection mode.
531    #[inline]
532    pub fn projection_value(&self) -> Projection {
533        (*self.projection).clone()
534    }
535
536    /// Returns current projection mode as mutable reference.
537    #[inline]
538    pub fn projection_mut(&mut self) -> &mut Projection {
539        self.projection.get_value_mut_and_mark_modified()
540    }
541
542    /// Sets current projection mode.
543    #[inline]
544    pub fn set_projection(&mut self, projection: Projection) -> Projection {
545        self.projection.set_value_and_mark_modified(projection)
546    }
547
548    /// Returns state of camera: enabled or not.
549    #[inline]
550    pub fn is_enabled(&self) -> bool {
551        *self.enabled
552    }
553
554    /// Enables or disables camera. Disabled cameras will be ignored during
555    /// rendering. This allows you to exclude views from specific cameras from
556    /// final picture.
557    #[inline]
558    pub fn set_enabled(&mut self, enabled: bool) -> bool {
559        self.enabled.set_value_and_mark_modified(enabled)
560    }
561
562    /// Sets new environment.
563    pub fn set_environment(
564        &mut self,
565        environment: Option<TextureResource>,
566    ) -> Option<TextureResource> {
567        self.environment.set_value_and_mark_modified(environment)
568    }
569
570    /// Return optional mutable reference to current environment.
571    pub fn environment_mut(&mut self) -> Option<&mut TextureResource> {
572        self.environment.get_value_mut_and_mark_modified().as_mut()
573    }
574
575    /// Return optional shared reference to current environment.
576    pub fn environment_ref(&self) -> Option<&TextureResource> {
577        self.environment.as_ref()
578    }
579
580    /// Return current environment map.
581    pub fn environment_map(&self) -> Option<TextureResource> {
582        (*self.environment).clone()
583    }
584
585    /// Creates picking ray from given screen coordinates.
586    pub fn make_ray(&self, screen_coord: Vector2<f32>, screen_size: Vector2<f32>) -> Ray {
587        let viewport = self.viewport_pixels(screen_size);
588        let nx = screen_coord.x / (viewport.w() as f32) * 2.0 - 1.0;
589        // Invert y here because OpenGL has origin at left bottom corner,
590        // but window coordinates starts from left *upper* corner.
591        let ny = (viewport.h() as f32 - screen_coord.y) / (viewport.h() as f32) * 2.0 - 1.0;
592        let inv_view_proj = self
593            .view_projection_matrix()
594            .try_inverse()
595            .unwrap_or_default();
596        let near = inv_view_proj * Vector4::new(nx, ny, -1.0, 1.0);
597        let far = inv_view_proj * Vector4::new(nx, ny, 1.0, 1.0);
598        let begin = near.xyz().scale(1.0 / near.w);
599        let end = far.xyz().scale(1.0 / far.w);
600        Ray::from_two_points(begin, end)
601    }
602
603    /// Calculates new fitting parameters for the given axis-aligned bounding box using current camera's
604    /// global transform and provided aspect ratio. See [`FitParameters`] docs for more info.
605    ///
606    /// This method returns fitting parameters and **do not** modify camera's state. It is needed, because in
607    /// some cases your camera could be attached to some sort of a hinge node and setting its local position
608    /// in order to fit it to the given AABB would break the preset spatial relations between nodes. Instead,
609    /// the method returns a set of parameters that can be used as you want.
610    #[inline]
611    #[must_use]
612    pub fn fit(
613        &self,
614        aabb: &AxisAlignedBoundingBox,
615        aspect_ratio: f32,
616        scale: f32,
617    ) -> FitParameters {
618        if aabb.is_invalid_or_degenerate() {
619            return FitParameters::fallback_perspective();
620        }
621
622        let look_vector = self
623            .look_vector()
624            .try_normalize(f32::EPSILON)
625            .unwrap_or_default();
626
627        match self.projection.deref() {
628            Projection::Perspective(perspective) => {
629                let radius = aabb.half_extents().max();
630
631                let denominator = (perspective.fov * 0.5).sin();
632                if denominator == 0.0 {
633                    return FitParameters::fallback_perspective();
634                }
635
636                let distance = radius / denominator * scale;
637                FitParameters::Perspective {
638                    position: aabb.center() - look_vector.scale(distance),
639                    distance,
640                }
641            }
642            Projection::Orthographic(_) => {
643                let mut min_x = f32::MAX;
644                let mut min_y = f32::MAX;
645                let mut max_x = -f32::MAX;
646                let mut max_y = -f32::MAX;
647                let inv = self.global_transform().try_inverse().unwrap_or_default();
648                for point in aabb.corners() {
649                    let local = inv.transform_point(&Point3::from(point));
650                    if local.x < min_x {
651                        min_x = local.x;
652                    }
653                    if local.y < min_y {
654                        min_y = local.y;
655                    }
656                    if local.x > max_x {
657                        max_x = local.x;
658                    }
659                    if local.y > max_y {
660                        max_y = local.y;
661                    }
662                }
663
664                FitParameters::Orthographic {
665                    position: aabb.center()
666                        - look_vector.scale((aabb.max - aabb.min).norm() * scale),
667                    vertical_size: (max_y - min_y).max((max_x - min_x) * aspect_ratio) * scale,
668                }
669            }
670        }
671    }
672
673    /// Returns current frustum of the camera.
674    #[inline]
675    pub fn frustum(&self) -> Frustum {
676        Frustum::from_view_projection_matrix(self.view_projection_matrix()).unwrap_or_default()
677    }
678
679    /// Projects given world space point on screen plane.
680    pub fn project(
681        &self,
682        world_pos: Vector3<f32>,
683        screen_size: Vector2<f32>,
684    ) -> Option<Vector2<f32>> {
685        let viewport = self.viewport_pixels(screen_size);
686        let proj = self.view_projection_matrix()
687            * Vector4::new(world_pos.x, world_pos.y, world_pos.z, 1.0);
688        if proj.w != 0.0 && proj.z >= 0.0 {
689            let k = (1.0 / proj.w) * 0.5;
690            Some(Vector2::new(
691                viewport.x() as f32 + viewport.w() as f32 * (proj.x * k + 0.5),
692                viewport.h() as f32
693                    - (viewport.y() as f32 + viewport.h() as f32 * (proj.y * k + 0.5)),
694            ))
695        } else {
696            None
697        }
698    }
699
700    /// Sets new color grading LUT.
701    pub fn set_color_grading_lut(
702        &mut self,
703        lut: Option<ColorGradingLut>,
704    ) -> Option<ColorGradingLut> {
705        self.color_grading_lut.set_value_and_mark_modified(lut)
706    }
707
708    /// Returns current color grading map.
709    pub fn color_grading_lut(&self) -> Option<ColorGradingLut> {
710        (*self.color_grading_lut).clone()
711    }
712
713    /// Returns current color grading map by ref.
714    pub fn color_grading_lut_ref(&self) -> Option<&ColorGradingLut> {
715        self.color_grading_lut.as_ref()
716    }
717
718    /// Enables or disables color grading.
719    pub fn set_color_grading_enabled(&mut self, enable: bool) -> bool {
720        self.color_grading_enabled
721            .set_value_and_mark_modified(enable)
722    }
723
724    /// Whether color grading enabled or not.
725    pub fn color_grading_enabled(&self) -> bool {
726        *self.color_grading_enabled
727    }
728
729    /// Sets new exposure. See `Exposure` struct docs for more info.
730    pub fn set_exposure(&mut self, exposure: Exposure) -> Exposure {
731        self.exposure.set_value_and_mark_modified(exposure)
732    }
733
734    /// Returns current exposure value.
735    pub fn exposure(&self) -> Exposure {
736        *self.exposure
737    }
738
739    /// Sets a new render target of the camera. If set, the camera will render to the specified
740    /// render target and will not appear in the final frame. Typical usage is something like this:
741    ///
742    /// ```rust
743    /// # use fyrox_impl::scene::camera::Camera;
744    /// # use fyrox_texture::{TextureResource, TextureResourceExtension};
745    /// fn set_render_target(camera: &mut Camera) {
746    ///     // Create a render target of 256x256 pixels. The size of the render target can be changed
747    ///     // at runtime, and the engine will automatically adjust GPU resources for you. The render
748    ///     // target is a resource, thus it can be shared across multiple "users". For instance, you
749    ///     // can apply this render target texture to a quad in your game world, and it will make a
750    ///     // sort of virtual camera (surveillance camera).
751    ///     let render_target = TextureResource::new_render_target(256, 256);
752    ///     camera.set_render_target(Some(render_target));
753    /// }
754    /// ```
755    ///
756    /// # Serialization
757    ///
758    /// The render target is non-serializable, and you have to re-create it after deserialization.
759    pub fn set_render_target(
760        &mut self,
761        render_target: Option<TextureResource>,
762    ) -> Option<TextureResource> {
763        std::mem::replace(&mut self.render_target, render_target)
764    }
765
766    /// Returns a reference to the current render target (if any).
767    pub fn render_target(&self) -> Option<&TextureResource> {
768        self.render_target.as_ref()
769    }
770}
771
772impl ConstructorProvider<Node, Graph> for Camera {
773    fn constructor() -> NodeConstructor {
774        NodeConstructor::new::<Self>().with_variant("Camera", |_| {
775            CameraBuilder::new(BaseBuilder::new().with_name("Camera"))
776                .build_node()
777                .into()
778        })
779    }
780}
781
782impl NodeTrait for Camera {
783    /// Returns current **local-space** bounding box.
784    #[inline]
785    fn local_bounding_box(&self) -> AxisAlignedBoundingBox {
786        // TODO: Maybe calculate AABB using frustum corners?
787        self.base.local_bounding_box()
788    }
789
790    /// Returns current **world-space** bounding box.
791    fn world_bounding_box(&self) -> AxisAlignedBoundingBox {
792        self.base.world_bounding_box()
793    }
794
795    fn id(&self) -> Uuid {
796        Self::type_uuid()
797    }
798
799    fn update(&mut self, context: &mut UpdateContext) {
800        let frame_size = if let Some(TextureKind::Rectangle { width, height }) = self
801            .render_target
802            .as_ref()
803            .and_then(|rt| rt.data_ref().as_loaded_ref().map(|rt| rt.kind()))
804        {
805            Vector2::new(width as f32, height as f32)
806        } else {
807            context.frame_size
808        };
809
810        self.calculate_matrices(frame_size);
811    }
812
813    fn debug_draw(&self, ctx: &mut SceneDrawingContext) {
814        let transform = self.global_transform_without_scaling();
815        ctx.draw_pyramid(
816            self.frustum().center(),
817            self.frustum().right_top_front_corner(),
818            self.frustum().left_top_front_corner(),
819            self.frustum().left_bottom_front_corner(),
820            self.frustum().right_bottom_front_corner(),
821            Color::GREEN,
822            transform,
823        );
824    }
825}
826
827/// All possible error that may occur during color grading look-up table creation.
828#[derive(Debug)]
829pub enum ColorGradingLutCreationError {
830    /// There is not enough data in provided texture to build LUT.
831    NotEnoughData {
832        /// Required amount of bytes.
833        required: usize,
834        /// Actual data size.
835        current: usize,
836    },
837
838    /// Pixel format is not supported. It must be either RGB8 or RGBA8.
839    InvalidPixelFormat(TexturePixelKind),
840
841    /// Texture error.
842    Texture(LoadError),
843}
844
845impl Display for ColorGradingLutCreationError {
846    fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
847        match self {
848            ColorGradingLutCreationError::NotEnoughData { required, current } => {
849                write!(
850                    f,
851                    "There is not enough data in provided \
852                texture to build LUT. Required: {required}, current: {current}.",
853                )
854            }
855            ColorGradingLutCreationError::InvalidPixelFormat(v) => {
856                write!(
857                    f,
858                    "Pixel format is not supported. It must be either RGB8 \
859                or RGBA8, but texture has {v:?} pixel format"
860                )
861            }
862            ColorGradingLutCreationError::Texture(v) => {
863                write!(f, "Texture load error: {v}")
864            }
865        }
866    }
867}
868
869/// Color grading look up table (LUT). Color grading is used to modify color space of the
870/// rendered frame; it maps one color space to another. It is widely used effect in games,
871/// you've probably noticed either "warmness" or "coldness" in colors in various scenes in
872/// games - this is achieved by color grading.
873///
874/// See [more info in Unreal engine docs](https://docs.unrealengine.com/4.26/en-US/RenderingAndGraphics/PostProcessEffects/UsingLUTs/)
875#[derive(Visit, Clone, Default, PartialEq, Debug, Reflect, Eq)]
876pub struct ColorGradingLut {
877    unwrapped_lut: Option<TextureResource>,
878
879    #[visit(skip)]
880    #[reflect(hidden)]
881    lut: Option<TextureResource>,
882}
883
884uuid_provider!(ColorGradingLut = "bca9c90a-7cde-4960-8814-c132edfc9614");
885
886impl ColorGradingLut {
887    /// Creates 3D look-up texture from 2D strip.
888    ///
889    /// # Input Texture Requirements
890    ///
891    /// Width: 1024px
892    /// Height: 16px
893    /// Pixel Format: RGB8/RGBA8
894    ///
895    /// # Usage
896    ///
897    /// Typical usage would be:
898    ///
899    /// ```no_run
900    /// # use fyrox_impl::scene::camera::ColorGradingLut;
901    /// # use fyrox_impl::asset::manager::{ResourceManager};
902    /// # use fyrox_impl::resource::texture::Texture;
903    ///
904    /// async fn create_lut(resource_manager: ResourceManager) -> ColorGradingLut {
905    ///     ColorGradingLut::new(resource_manager.request::<Texture>(
906    ///         "your_lut.jpg",
907    ///     ))
908    ///     .await
909    ///     .unwrap()
910    /// }
911    /// ```
912    ///
913    /// Then pass LUT to either CameraBuilder or to camera instance, and don't forget to enable
914    /// color grading.
915    pub async fn new(unwrapped_lut: TextureResource) -> Result<Self, ColorGradingLutCreationError> {
916        match unwrapped_lut.await {
917            Ok(unwrapped_lut) => {
918                let data = unwrapped_lut.data_ref();
919
920                if data.pixel_kind() != TexturePixelKind::RGBA8
921                    && data.pixel_kind() != TexturePixelKind::RGB8
922                {
923                    return Err(ColorGradingLutCreationError::InvalidPixelFormat(
924                        data.pixel_kind(),
925                    ));
926                }
927
928                let bytes = data.data();
929
930                const RGBA8_SIZE: usize = 16 * 16 * 16 * 4;
931                const RGB8_SIZE: usize = 16 * 16 * 16 * 3;
932
933                if data.pixel_kind() == TexturePixelKind::RGBA8 {
934                    if bytes.len() != RGBA8_SIZE {
935                        return Err(ColorGradingLutCreationError::NotEnoughData {
936                            required: RGBA8_SIZE,
937                            current: bytes.len(),
938                        });
939                    }
940                } else if bytes.len() != RGB8_SIZE {
941                    return Err(ColorGradingLutCreationError::NotEnoughData {
942                        required: RGB8_SIZE,
943                        current: bytes.len(),
944                    });
945                }
946
947                let pixel_size = if data.pixel_kind() == TexturePixelKind::RGBA8 {
948                    4
949                } else {
950                    3
951                };
952
953                let mut lut_bytes = Vec::with_capacity(16 * 16 * 16 * 3);
954
955                for z in 0..16 {
956                    for y in 0..16 {
957                        for x in 0..16 {
958                            let pixel_index = z * 16 + y * 16 * 16 + x;
959                            let pixel_byte_pos = pixel_index * pixel_size;
960
961                            lut_bytes.push(bytes[pixel_byte_pos]); // R
962                            lut_bytes.push(bytes[pixel_byte_pos + 1]); // G
963                            lut_bytes.push(bytes[pixel_byte_pos + 2]); // B
964                        }
965                    }
966                }
967
968                let lut = TextureResource::from_bytes(
969                    Uuid::new_v4(),
970                    TextureKind::Volume {
971                        width: 16,
972                        height: 16,
973                        depth: 16,
974                    },
975                    TexturePixelKind::RGB8,
976                    lut_bytes,
977                    ResourceKind::Embedded,
978                )
979                .unwrap();
980
981                let mut lut_ref = lut.data_ref();
982
983                lut_ref.set_s_wrap_mode(TextureWrapMode::ClampToEdge);
984                lut_ref.set_t_wrap_mode(TextureWrapMode::ClampToEdge);
985
986                drop(lut_ref);
987                drop(data);
988
989                Ok(Self {
990                    lut: Some(lut),
991                    unwrapped_lut: Some(unwrapped_lut),
992                })
993            }
994            Err(e) => Err(ColorGradingLutCreationError::Texture(e)),
995        }
996    }
997
998    /// Returns color grading unwrapped look-up table. This is initial texture that was
999    /// used to create the look-up table.
1000    pub fn unwrapped_lut(&self) -> TextureResource {
1001        self.unwrapped_lut.clone().unwrap()
1002    }
1003
1004    /// Returns 3D color grading look-up table ready for use on GPU.
1005    pub fn lut(&self) -> TextureResource {
1006        self.lut.clone().unwrap()
1007    }
1008
1009    /// Returns 3D color grading look-up table by ref ready for use on GPU.
1010    pub fn lut_ref(&self) -> &TextureResource {
1011        self.lut.as_ref().unwrap()
1012    }
1013}
1014
1015/// Camera builder is used to create new camera in declarative manner.
1016/// This is typical implementation of Builder pattern.
1017pub struct CameraBuilder {
1018    base_builder: BaseBuilder,
1019    fov: f32,
1020    z_near: f32,
1021    z_far: f32,
1022    viewport: Rect<f32>,
1023    enabled: bool,
1024    environment: Option<TextureResource>,
1025    exposure: Exposure,
1026    color_grading_lut: Option<ColorGradingLut>,
1027    color_grading_enabled: bool,
1028    projection: Projection,
1029    render_target: Option<TextureResource>,
1030}
1031
1032impl CameraBuilder {
1033    /// Creates new camera builder using given base node builder.
1034    pub fn new(base_builder: BaseBuilder) -> Self {
1035        Self {
1036            enabled: true,
1037            base_builder,
1038            fov: 75.0f32.to_radians(),
1039            z_near: 0.025,
1040            z_far: 2048.0,
1041            viewport: Rect::new(0.0, 0.0, 1.0, 1.0),
1042            environment: None,
1043            exposure: Exposure::Manual(std::f32::consts::E),
1044            color_grading_lut: None,
1045            color_grading_enabled: false,
1046            projection: Projection::default(),
1047            render_target: None,
1048        }
1049    }
1050
1051    /// Sets desired field of view in radians.
1052    pub fn with_fov(mut self, fov: f32) -> Self {
1053        self.fov = fov;
1054        self
1055    }
1056
1057    /// Sets desired near projection plane.
1058    pub fn with_z_near(mut self, z_near: f32) -> Self {
1059        self.z_near = z_near;
1060        self
1061    }
1062
1063    /// Sets desired far projection plane.
1064    pub fn with_z_far(mut self, z_far: f32) -> Self {
1065        self.z_far = z_far;
1066        self
1067    }
1068
1069    /// Sets desired viewport.
1070    pub fn with_viewport(mut self, viewport: Rect<f32>) -> Self {
1071        self.viewport = viewport;
1072        self
1073    }
1074
1075    /// Sets desired initial state of camera: enabled or disabled.
1076    pub fn enabled(mut self, enabled: bool) -> Self {
1077        self.enabled = enabled;
1078        self
1079    }
1080
1081    /// Sets desired environment map.
1082    pub fn with_environment(mut self, environment: TextureResource) -> Self {
1083        self.environment = Some(environment);
1084        self
1085    }
1086
1087    /// Sets desired color grading LUT.
1088    pub fn with_color_grading_lut(mut self, lut: ColorGradingLut) -> Self {
1089        self.color_grading_lut = Some(lut);
1090        self
1091    }
1092
1093    /// Sets whether color grading should be enabled or not.
1094    pub fn with_color_grading_enabled(mut self, enabled: bool) -> Self {
1095        self.color_grading_enabled = enabled;
1096        self
1097    }
1098
1099    /// Sets desired exposure options.
1100    pub fn with_exposure(mut self, exposure: Exposure) -> Self {
1101        self.exposure = exposure;
1102        self
1103    }
1104
1105    /// Sets desired projection mode.
1106    pub fn with_projection(mut self, projection: Projection) -> Self {
1107        self.projection = projection;
1108        self
1109    }
1110
1111    /// Sets desired render target for the camera.
1112    pub fn with_render_target(mut self, render_target: Option<TextureResource>) -> Self {
1113        self.render_target = render_target;
1114        self
1115    }
1116
1117    /// Creates new instance of camera.
1118    pub fn build_camera(self) -> Camera {
1119        Camera {
1120            enabled: self.enabled.into(),
1121            base: self.base_builder.build_base(),
1122            projection: self.projection.into(),
1123            viewport: self.viewport.into(),
1124            // No need to calculate these matrices - they'll be automatically
1125            // recalculated before rendering.
1126            view_matrix: Matrix4::identity(),
1127            projection_matrix: Matrix4::identity(),
1128            environment: self.environment.into(),
1129            exposure: self.exposure.into(),
1130            color_grading_lut: self.color_grading_lut.into(),
1131            color_grading_enabled: self.color_grading_enabled.into(),
1132            render_target: self.render_target,
1133        }
1134    }
1135
1136    /// Creates new instance of camera node.
1137    pub fn build_node(self) -> Node {
1138        Node::new(self.build_camera())
1139    }
1140
1141    /// Creates new instance of camera node and adds it to the graph.
1142    pub fn build(self, graph: &mut Graph) -> Handle<Node> {
1143        graph.add_node(self.build_node())
1144    }
1145}