zng_webrender/renderer/
mod.rs

1/* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
4
5//! The high-level module responsible for interfacing with the GPU.
6//!
7//! Much of WebRender's design is driven by separating work into different
8//! threads. To avoid the complexities of multi-threaded GPU access, we restrict
9//! all communication with the GPU to one thread, the render thread. But since
10//! issuing GPU commands is often a bottleneck, we move everything else (i.e.
11//! the computation of what commands to issue) to another thread, the
12//! RenderBackend thread. The RenderBackend, in turn, may delegate work to other
13//! thread (like the SceneBuilder threads or Rayon workers), but the
14//! Render-vs-RenderBackend distinction is the most important.
15//!
16//! The consumer is responsible for initializing the render thread before
17//! calling into WebRender, which means that this module also serves as the
18//! initial entry point into WebRender, and is responsible for spawning the
19//! various other threads discussed above. That said, WebRender initialization
20//! returns both the `Renderer` instance as well as a channel for communicating
21//! directly with the `RenderBackend`. Aside from a few high-level operations
22//! like 'render now', most of interesting commands from the consumer go over
23//! that channel and operate on the `RenderBackend`.
24//!
25//! ## Space conversion guidelines
26//! At this stage, we shuld be operating with `DevicePixel` and `FramebufferPixel` only.
27//! "Framebuffer" space represents the final destination of our rendeing,
28//! and it happens to be Y-flipped on OpenGL. The conversion is done as follows:
29//!   - for rasterized primitives, the orthographics projection transforms
30//! the content rectangle to -1 to 1
31//!   - the viewport transformation is setup to map the whole range to
32//! the framebuffer rectangle provided by the document view, stored in `DrawTarget`
33//!   - all the direct framebuffer operations, like blitting, reading pixels, and setting
34//! up the scissor, are accepting already transformed coordinates, which we can get by
35//! calling `DrawTarget::to_framebuffer_rect`
36
37use api::{ClipMode, ColorF, ColorU, MixBlendMode};
38use api::{DocumentId, Epoch, ExternalImageHandler, RenderReasons};
39#[cfg(feature = "replay")]
40use api::ExternalImageId;
41use api::{ExternalImageSource, ExternalImageType, ImageFormat, PremultipliedColorF};
42use api::{PipelineId, ImageRendering, Checkpoint, NotificationRequest, ImageBufferKind};
43#[cfg(feature = "replay")]
44use api::ExternalImage;
45use api::FramePublishId;
46use api::units::*;
47use api::channel::{Sender, Receiver};
48pub use api::DebugFlags;
49use core::time::Duration;
50
51use crate::pattern::PatternKind;
52use crate::render_api::{DebugCommand, ApiMsg, MemoryReport};
53use crate::batch::{AlphaBatchContainer, BatchKind, BatchFeatures, BatchTextures, BrushBatchKind, ClipBatchList};
54use crate::batch::ClipMaskInstanceList;
55#[cfg(any(feature = "capture", feature = "replay"))]
56use crate::capture::{CaptureConfig, ExternalCaptureImage, PlainExternalImage};
57use crate::composite::{CompositeState, CompositeTileSurface, CompositorInputLayer, CompositorSurfaceTransform, ResolvedExternalSurface};
58use crate::composite::{CompositorKind, Compositor, NativeTileId, CompositeFeatures, CompositeSurfaceFormat, ResolvedExternalSurfaceColorData};
59use crate::composite::{CompositorConfig, NativeSurfaceOperationDetails, NativeSurfaceId, NativeSurfaceOperation, ClipRadius};
60use crate::composite::TileKind;
61use crate::segment::SegmentBuilder;
62use crate::{debug_colors, CompositorInputConfig, CompositorSurfaceUsage};
63use crate::device::{DepthFunction, Device, DrawTarget, ExternalTexture, GpuFrameId, UploadPBOPool};
64use crate::device::{ReadTarget, ShaderError, Texture, TextureFilter, TextureFlags, TextureSlot, Texel};
65use crate::device::query::{GpuSampler, GpuTimer};
66#[cfg(feature = "capture")]
67use crate::device::FBOId;
68use crate::debug_item::DebugItem;
69use crate::frame_builder::Frame;
70use glyph_rasterizer::GlyphFormat;
71use crate::gpu_cache::{GpuCacheUpdate, GpuCacheUpdateList};
72use crate::gpu_cache::{GpuCacheDebugChunk, GpuCacheDebugCmd};
73use crate::gpu_types::{ScalingInstance, SvgFilterInstance, SVGFEFilterInstance, CopyInstance, PrimitiveInstanceData};
74use crate::gpu_types::{BlurInstance, ClearInstance, CompositeInstance};
75use crate::internal_types::{TextureSource, TextureSourceExternal, TextureCacheCategory, FrameId, FrameVec};
76#[cfg(any(feature = "capture", feature = "replay"))]
77use crate::internal_types::DebugOutput;
78use crate::internal_types::{CacheTextureId, FastHashMap, FastHashSet, RenderedDocument, ResultMsg};
79use crate::internal_types::{TextureCacheAllocInfo, TextureCacheAllocationKind, TextureUpdateList};
80use crate::internal_types::{RenderTargetInfo, Swizzle, DeferredResolveIndex};
81use crate::picture::ResolvedSurfaceTexture;
82use crate::prim_store::DeferredResolve;
83use crate::profiler::{self, GpuProfileTag, TransactionProfile};
84use crate::profiler::{Profiler, add_event_marker, add_text_marker, thread_is_being_profiled};
85use crate::device::query::GpuProfiler;
86use crate::render_target::ResolveOp;
87use crate::render_task_graph::RenderTaskGraph;
88use crate::render_task::{RenderTask, RenderTaskKind, ReadbackTask};
89use crate::screen_capture::AsyncScreenshotGrabber;
90use crate::render_target::{RenderTarget, PictureCacheTarget, PictureCacheTargetKind};
91use crate::render_target::{RenderTargetKind, BlitJob};
92use crate::telemetry::Telemetry;
93use crate::tile_cache::PictureCacheDebugInfo;
94use crate::util::drain_filter;
95use crate::rectangle_occlusion as occlusion;
96use upload::{upload_to_texture_cache, UploadTexturePool};
97use init::*;
98
99use euclid::{rect, Transform3D, Scale, default};
100use gleam::gl;
101use malloc_size_of::MallocSizeOfOps;
102
103#[cfg(feature = "replay")]
104use std::sync::Arc;
105
106use std::{
107    cell::RefCell,
108    collections::VecDeque,
109    f32,
110    ffi::c_void,
111    mem,
112    num::NonZeroUsize,
113    path::PathBuf,
114    rc::Rc,
115};
116#[cfg(any(feature = "capture", feature = "replay"))]
117use std::collections::hash_map::Entry;
118use crate::util::precise_time_ns;
119
120mod debug;
121mod gpu_buffer;
122mod gpu_cache;
123mod shade;
124mod vertex;
125mod upload;
126pub(crate) mod init;
127
128pub use debug::DebugRenderer;
129pub use shade::{PendingShadersToPrecache, Shaders, SharedShaders};
130pub use vertex::{desc, VertexArrayKind, MAX_VERTEX_TEXTURE_WIDTH};
131pub use gpu_buffer::{GpuBuffer, GpuBufferF, GpuBufferBuilderF, GpuBufferI, GpuBufferBuilderI, GpuBufferAddress, GpuBufferBuilder};
132
133/// The size of the array of each type of vertex data texture that
134/// is round-robin-ed each frame during bind_frame_data. Doing this
135/// helps avoid driver stalls while updating the texture in some
136/// drivers. The size of these textures are typically very small
137/// (e.g. < 16 kB) so it's not a huge waste of memory. Despite that,
138/// this is a short-term solution - we want to find a better way
139/// to provide this frame data, which will likely involve some
140/// combination of UBO/SSBO usage. Although this only affects some
141/// platforms, it's enabled on all platforms to reduce testing
142/// differences between platforms.
143pub const VERTEX_DATA_TEXTURE_COUNT: usize = 3;
144
145/// Number of GPU blocks per UV rectangle provided for an image.
146pub const BLOCKS_PER_UV_RECT: usize = 2;
147
148const GPU_TAG_BRUSH_OPACITY: GpuProfileTag = GpuProfileTag {
149    label: "B_Opacity",
150    color: debug_colors::DARKMAGENTA,
151};
152const GPU_TAG_BRUSH_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
153    label: "B_LinearGradient",
154    color: debug_colors::POWDERBLUE,
155};
156const GPU_TAG_BRUSH_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
157    label: "B_YuvImage",
158    color: debug_colors::DARKGREEN,
159};
160const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
161    label: "B_MixBlend",
162    color: debug_colors::MAGENTA,
163};
164const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
165    label: "B_Blend",
166    color: debug_colors::ORANGE,
167};
168const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
169    label: "B_Image",
170    color: debug_colors::SPRINGGREEN,
171};
172const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
173    label: "B_Solid",
174    color: debug_colors::RED,
175};
176const GPU_TAG_CACHE_CLIP: GpuProfileTag = GpuProfileTag {
177    label: "C_Clip",
178    color: debug_colors::PURPLE,
179};
180const GPU_TAG_CACHE_BORDER: GpuProfileTag = GpuProfileTag {
181    label: "C_Border",
182    color: debug_colors::CORNSILK,
183};
184const GPU_TAG_CACHE_LINE_DECORATION: GpuProfileTag = GpuProfileTag {
185    label: "C_LineDecoration",
186    color: debug_colors::YELLOWGREEN,
187};
188const GPU_TAG_CACHE_FAST_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
189    label: "C_FastLinearGradient",
190    color: debug_colors::BROWN,
191};
192const GPU_TAG_CACHE_LINEAR_GRADIENT: GpuProfileTag = GpuProfileTag {
193    label: "C_LinearGradient",
194    color: debug_colors::BROWN,
195};
196const GPU_TAG_RADIAL_GRADIENT: GpuProfileTag = GpuProfileTag {
197    label: "C_RadialGradient",
198    color: debug_colors::BROWN,
199};
200const GPU_TAG_CONIC_GRADIENT: GpuProfileTag = GpuProfileTag {
201    label: "C_ConicGradient",
202    color: debug_colors::BROWN,
203};
204const GPU_TAG_SETUP_TARGET: GpuProfileTag = GpuProfileTag {
205    label: "target init",
206    color: debug_colors::SLATEGREY,
207};
208const GPU_TAG_SETUP_DATA: GpuProfileTag = GpuProfileTag {
209    label: "data init",
210    color: debug_colors::LIGHTGREY,
211};
212const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
213    label: "SplitComposite",
214    color: debug_colors::DARKBLUE,
215};
216const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
217    label: "TextRun",
218    color: debug_colors::BLUE,
219};
220const GPU_TAG_PRIMITIVE: GpuProfileTag = GpuProfileTag {
221    label: "Primitive",
222    color: debug_colors::RED,
223};
224const GPU_TAG_INDIRECT_PRIM: GpuProfileTag = GpuProfileTag {
225    label: "Primitive (indirect)",
226    color: debug_colors::YELLOWGREEN,
227};
228const GPU_TAG_INDIRECT_MASK: GpuProfileTag = GpuProfileTag {
229    label: "Mask (indirect)",
230    color: debug_colors::IVORY,
231};
232const GPU_TAG_BLUR: GpuProfileTag = GpuProfileTag {
233    label: "Blur",
234    color: debug_colors::VIOLET,
235};
236const GPU_TAG_BLIT: GpuProfileTag = GpuProfileTag {
237    label: "Blit",
238    color: debug_colors::LIME,
239};
240const GPU_TAG_SCALE: GpuProfileTag = GpuProfileTag {
241    label: "Scale",
242    color: debug_colors::GHOSTWHITE,
243};
244const GPU_SAMPLER_TAG_ALPHA: GpuProfileTag = GpuProfileTag {
245    label: "Alpha targets",
246    color: debug_colors::BLACK,
247};
248const GPU_SAMPLER_TAG_OPAQUE: GpuProfileTag = GpuProfileTag {
249    label: "Opaque pass",
250    color: debug_colors::BLACK,
251};
252const GPU_SAMPLER_TAG_TRANSPARENT: GpuProfileTag = GpuProfileTag {
253    label: "Transparent pass",
254    color: debug_colors::BLACK,
255};
256const GPU_TAG_SVG_FILTER: GpuProfileTag = GpuProfileTag {
257    label: "SvgFilter",
258    color: debug_colors::LEMONCHIFFON,
259};
260const GPU_TAG_SVG_FILTER_NODES: GpuProfileTag = GpuProfileTag {
261    label: "SvgFilterNodes",
262    color: debug_colors::LEMONCHIFFON,
263};
264const GPU_TAG_COMPOSITE: GpuProfileTag = GpuProfileTag {
265    label: "Composite",
266    color: debug_colors::TOMATO,
267};
268
269// Key used when adding compositing tiles to the occlusion tracker.
270// Since an entire tile may have a mask, but we may segment that in
271// to masked and non-masked regions, we need to track which of the
272// occlusion tracker outputs need a mask
273#[derive(Debug, Copy, Clone)]
274struct OcclusionItemKey {
275    tile_index: usize,
276    needs_mask: bool,
277}
278
279// Defines the content that we will draw to a given swapchain / layer, calculated
280// after occlusion culling.
281struct SwapChainLayer {
282    occlusion: occlusion::FrontToBackBuilder<OcclusionItemKey>,
283    clear_tiles: Vec<occlusion::Item<OcclusionItemKey>>,
284}
285
286/// The clear color used for the texture cache when the debug display is enabled.
287/// We use a shade of blue so that we can still identify completely blue items in
288/// the texture cache.
289pub const TEXTURE_CACHE_DBG_CLEAR_COLOR: [f32; 4] = [0.0, 0.0, 0.8, 1.0];
290
291impl BatchKind {
292    fn sampler_tag(&self) -> GpuProfileTag {
293        match *self {
294            BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
295            BatchKind::Brush(kind) => {
296                match kind {
297                    BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
298                    BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
299                    BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
300                    BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
301                    BrushBatchKind::YuvImage(..) => GPU_TAG_BRUSH_YUV_IMAGE,
302                    BrushBatchKind::LinearGradient => GPU_TAG_BRUSH_LINEAR_GRADIENT,
303                    BrushBatchKind::Opacity => GPU_TAG_BRUSH_OPACITY,
304                }
305            }
306            BatchKind::TextRun(_) => GPU_TAG_PRIM_TEXT_RUN,
307            BatchKind::Quad(PatternKind::ColorOrTexture) => GPU_TAG_PRIMITIVE,
308            BatchKind::Quad(PatternKind::RadialGradient) => GPU_TAG_RADIAL_GRADIENT,
309            BatchKind::Quad(PatternKind::ConicGradient) => GPU_TAG_CONIC_GRADIENT,
310            BatchKind::Quad(PatternKind::Mask) => GPU_TAG_INDIRECT_MASK,
311        }
312    }
313}
314
315fn flag_changed(before: DebugFlags, after: DebugFlags, select: DebugFlags) -> Option<bool> {
316    if before & select != after & select {
317        Some(after.contains(select))
318    } else {
319        None
320    }
321}
322
323#[repr(C)]
324#[derive(Copy, Clone, Debug)]
325pub enum ShaderColorMode {
326    Alpha = 0,
327    SubpixelDualSource = 1,
328    BitmapShadow = 2,
329    ColorBitmap = 3,
330    Image = 4,
331    MultiplyDualSource = 5,
332}
333
334impl From<GlyphFormat> for ShaderColorMode {
335    fn from(format: GlyphFormat) -> ShaderColorMode {
336        match format {
337            GlyphFormat::Alpha |
338            GlyphFormat::TransformedAlpha |
339            GlyphFormat::Bitmap => ShaderColorMode::Alpha,
340            GlyphFormat::Subpixel | GlyphFormat::TransformedSubpixel => {
341                panic!("Subpixel glyph formats must be handled separately.");
342            }
343            GlyphFormat::ColorBitmap => ShaderColorMode::ColorBitmap,
344        }
345    }
346}
347
348/// Enumeration of the texture samplers used across the various WebRender shaders.
349///
350/// Each variant corresponds to a uniform declared in shader source. We only bind
351/// the variants we need for a given shader, so not every variant is bound for every
352/// batch.
353#[derive(Debug, Copy, Clone, PartialEq, Eq)]
354pub(crate) enum TextureSampler {
355    Color0,
356    Color1,
357    Color2,
358    GpuCache,
359    TransformPalette,
360    RenderTasks,
361    Dither,
362    PrimitiveHeadersF,
363    PrimitiveHeadersI,
364    ClipMask,
365    GpuBufferF,
366    GpuBufferI,
367}
368
369impl TextureSampler {
370    pub(crate) fn color(n: usize) -> TextureSampler {
371        match n {
372            0 => TextureSampler::Color0,
373            1 => TextureSampler::Color1,
374            2 => TextureSampler::Color2,
375            _ => {
376                panic!("There are only 3 color samplers.");
377            }
378        }
379    }
380}
381
382impl Into<TextureSlot> for TextureSampler {
383    fn into(self) -> TextureSlot {
384        match self {
385            TextureSampler::Color0 => TextureSlot(0),
386            TextureSampler::Color1 => TextureSlot(1),
387            TextureSampler::Color2 => TextureSlot(2),
388            TextureSampler::GpuCache => TextureSlot(3),
389            TextureSampler::TransformPalette => TextureSlot(4),
390            TextureSampler::RenderTasks => TextureSlot(5),
391            TextureSampler::Dither => TextureSlot(6),
392            TextureSampler::PrimitiveHeadersF => TextureSlot(7),
393            TextureSampler::PrimitiveHeadersI => TextureSlot(8),
394            TextureSampler::ClipMask => TextureSlot(9),
395            TextureSampler::GpuBufferF => TextureSlot(10),
396            TextureSampler::GpuBufferI => TextureSlot(11),
397        }
398    }
399}
400
401#[derive(Clone, Debug, PartialEq)]
402pub enum GraphicsApi {
403    OpenGL,
404}
405
406#[derive(Clone, Debug)]
407pub struct GraphicsApiInfo {
408    pub kind: GraphicsApi,
409    pub renderer: String,
410    pub version: String,
411}
412
413#[derive(Debug)]
414pub struct GpuProfile {
415    pub frame_id: GpuFrameId,
416    pub paint_time_ns: u64,
417}
418
419impl GpuProfile {
420    fn new(frame_id: GpuFrameId, timers: &[GpuTimer]) -> GpuProfile {
421        let mut paint_time_ns = 0;
422        for timer in timers {
423            paint_time_ns += timer.time_ns;
424        }
425        GpuProfile {
426            frame_id,
427            paint_time_ns,
428        }
429    }
430}
431
432#[derive(Debug)]
433pub struct CpuProfile {
434    pub frame_id: GpuFrameId,
435    pub backend_time_ns: u64,
436    pub composite_time_ns: u64,
437    pub draw_calls: usize,
438}
439
440impl CpuProfile {
441    fn new(
442        frame_id: GpuFrameId,
443        backend_time_ns: u64,
444        composite_time_ns: u64,
445        draw_calls: usize,
446    ) -> CpuProfile {
447        CpuProfile {
448            frame_id,
449            backend_time_ns,
450            composite_time_ns,
451            draw_calls,
452        }
453    }
454}
455
456/// The selected partial present mode for a given frame.
457#[derive(Debug, Copy, Clone)]
458enum PartialPresentMode {
459    /// The device supports fewer dirty rects than the number of dirty rects
460    /// that WR produced. In this case, the WR dirty rects are union'ed into
461    /// a single dirty rect, that is provided to the caller.
462    Single {
463        dirty_rect: DeviceRect,
464    },
465}
466
467struct CacheTexture {
468    texture: Texture,
469    category: TextureCacheCategory,
470}
471
472/// Helper struct for resolving device Textures for use during rendering passes.
473///
474/// Manages the mapping between the at-a-distance texture handles used by the
475/// `RenderBackend` (which does not directly interface with the GPU) and actual
476/// device texture handles.
477struct TextureResolver {
478    /// A map to resolve texture cache IDs to native textures.
479    texture_cache_map: FastHashMap<CacheTextureId, CacheTexture>,
480
481    /// Map of external image IDs to native textures.
482    external_images: FastHashMap<DeferredResolveIndex, ExternalTexture>,
483
484    /// A special 1x1 dummy texture used for shaders that expect to work with
485    /// the output of the previous pass but are actually running in the first
486    /// pass.
487    dummy_cache_texture: Texture,
488}
489
490impl TextureResolver {
491    fn new(device: &mut Device) -> TextureResolver {
492        let dummy_cache_texture = device
493            .create_texture(
494                ImageBufferKind::Texture2D,
495                ImageFormat::RGBA8,
496                1,
497                1,
498                TextureFilter::Linear,
499                None,
500            );
501        device.upload_texture_immediate(
502            &dummy_cache_texture,
503            &[0xff, 0xff, 0xff, 0xff],
504        );
505
506        TextureResolver {
507            texture_cache_map: FastHashMap::default(),
508            external_images: FastHashMap::default(),
509            dummy_cache_texture,
510        }
511    }
512
513    fn deinit(self, device: &mut Device) {
514        device.delete_texture(self.dummy_cache_texture);
515
516        for (_id, item) in self.texture_cache_map {
517            device.delete_texture(item.texture);
518        }
519    }
520
521    fn begin_frame(&mut self) {
522    }
523
524    fn end_pass(
525        &mut self,
526        device: &mut Device,
527        textures_to_invalidate: &[CacheTextureId],
528    ) {
529        // For any texture that is no longer needed, immediately
530        // invalidate it so that tiled GPUs don't need to resolve it
531        // back to memory.
532        for texture_id in textures_to_invalidate {
533            let render_target = &self.texture_cache_map[texture_id].texture;
534            device.invalidate_render_target(render_target);
535        }
536    }
537
538    // Bind a source texture to the device.
539    fn bind(&self, texture_id: &TextureSource, sampler: TextureSampler, device: &mut Device) -> Swizzle {
540        match *texture_id {
541            TextureSource::Invalid => {
542                Swizzle::default()
543            }
544            TextureSource::Dummy => {
545                let swizzle = Swizzle::default();
546                device.bind_texture(sampler, &self.dummy_cache_texture, swizzle);
547                swizzle
548            }
549            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
550                let texture = self.external_images
551                    .get(index)
552                    .expect("BUG: External image should be resolved by now");
553                device.bind_external_texture(sampler, texture);
554                Swizzle::default()
555            }
556            TextureSource::TextureCache(index, swizzle) => {
557                let texture = &self.texture_cache_map[&index].texture;
558                device.bind_texture(sampler, texture, swizzle);
559                swizzle
560            }
561        }
562    }
563
564    // Get the real (OpenGL) texture ID for a given source texture.
565    // For a texture cache texture, the IDs are stored in a vector
566    // map for fast access.
567    fn resolve(&self, texture_id: &TextureSource) -> Option<(&Texture, Swizzle)> {
568        match *texture_id {
569            TextureSource::Invalid => None,
570            TextureSource::Dummy => {
571                Some((&self.dummy_cache_texture, Swizzle::default()))
572            }
573            TextureSource::External(..) => {
574                panic!("BUG: External textures cannot be resolved, they can only be bound.");
575            }
576            TextureSource::TextureCache(index, swizzle) => {
577                Some((&self.texture_cache_map[&index].texture, swizzle))
578            }
579        }
580    }
581
582    // Retrieve the deferred / resolved UV rect if an external texture, otherwise
583    // return the default supplied UV rect.
584    fn get_uv_rect(
585        &self,
586        source: &TextureSource,
587        default_value: TexelRect,
588    ) -> TexelRect {
589        match source {
590            TextureSource::External(TextureSourceExternal { ref index, .. }) => {
591                let texture = self.external_images
592                    .get(index)
593                    .expect("BUG: External image should be resolved by now");
594                texture.get_uv_rect()
595            }
596            _ => {
597                default_value
598            }
599        }
600    }
601
602    /// Returns the size of the texture in pixels
603    fn get_texture_size(&self, texture: &TextureSource) -> DeviceIntSize {
604        match *texture {
605            TextureSource::Invalid => DeviceIntSize::zero(),
606            TextureSource::TextureCache(id, _) => {
607                self.texture_cache_map[&id].texture.get_dimensions()
608            },
609            TextureSource::External(TextureSourceExternal { index, .. }) => {
610                // If UV coords are normalized then this value will be incorrect. However, the
611                // texture size is currently only used to set the uTextureSize uniform, so that
612                // shaders without access to textureSize() can normalize unnormalized UVs. Which
613                // means this is not a problem.
614                let uv_rect = self.external_images[&index].get_uv_rect();
615                (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32()
616            },
617            TextureSource::Dummy => DeviceIntSize::new(1, 1),
618        }
619    }
620
621    fn report_memory(&self) -> MemoryReport {
622        let mut report = MemoryReport::default();
623
624        // We're reporting GPU memory rather than heap-allocations, so we don't
625        // use size_of_op.
626        for item in self.texture_cache_map.values() {
627            let counter = match item.category {
628                TextureCacheCategory::Atlas => &mut report.atlas_textures,
629                TextureCacheCategory::Standalone => &mut report.standalone_textures,
630                TextureCacheCategory::PictureTile => &mut report.picture_tile_textures,
631                TextureCacheCategory::RenderTarget => &mut report.render_target_textures,
632            };
633            *counter += item.texture.size_in_bytes();
634        }
635
636        report
637    }
638
639    fn update_profile(&self, profile: &mut TransactionProfile) {
640        let mut external_image_bytes = 0;
641        for img in self.external_images.values() {
642            let uv_rect = img.get_uv_rect();
643            // If UV coords are normalized then this value will be incorrect. This is unfortunate
644            // but doesn't impact end users at all.
645            let size = (uv_rect.uv1 - uv_rect.uv0).abs().to_size().to_i32();
646
647            // Assume 4 bytes per pixels which is true most of the time but
648            // not always.
649            let bpp = 4;
650            external_image_bytes += size.area() as usize * bpp;
651        }
652
653        profile.set(profiler::EXTERNAL_IMAGE_BYTES, profiler::bytes_to_mb(external_image_bytes));
654    }
655
656    fn get_cache_texture_mut(&mut self, id: &CacheTextureId) -> &mut Texture {
657        &mut self.texture_cache_map
658            .get_mut(id)
659            .expect("bug: texture not allocated")
660            .texture
661    }
662}
663
664#[derive(Debug, Copy, Clone, PartialEq)]
665#[cfg_attr(feature = "capture", derive(Serialize))]
666#[cfg_attr(feature = "replay", derive(Deserialize))]
667pub enum BlendMode {
668    None,
669    Alpha,
670    PremultipliedAlpha,
671    PremultipliedDestOut,
672    SubpixelDualSource,
673    Advanced(MixBlendMode),
674    MultiplyDualSource,
675    Screen,
676    Exclusion,
677    PlusLighter,
678}
679
680impl BlendMode {
681    /// Decides when a given mix-blend-mode can be implemented in terms of
682    /// simple blending, dual-source blending, advanced blending, or not at
683    /// all based on available capabilities.
684    pub fn from_mix_blend_mode(
685        mode: MixBlendMode,
686        advanced_blend: bool,
687        coherent: bool,
688        dual_source: bool,
689    ) -> Option<BlendMode> {
690        // If we emulate a mix-blend-mode via simple or dual-source blending,
691        // care must be taken to output alpha As + Ad*(1-As) regardless of what
692        // the RGB output is to comply with the mix-blend-mode spec.
693        Some(match mode {
694            // If we have coherent advanced blend, just use that.
695            _ if advanced_blend && coherent => BlendMode::Advanced(mode),
696            // Screen can be implemented as Cs + Cd - Cs*Cd => Cs + Cd*(1-Cs)
697            MixBlendMode::Screen => BlendMode::Screen,
698            // Exclusion can be implemented as Cs + Cd - 2*Cs*Cd => Cs*(1-Cd) + Cd*(1-Cs)
699            MixBlendMode::Exclusion => BlendMode::Exclusion,
700            // PlusLighter is basically a clamped add.
701            MixBlendMode::PlusLighter => BlendMode::PlusLighter,
702            // Multiply can be implemented as Cs*Cd + Cs*(1-Ad) + Cd*(1-As) => Cs*(1-Ad) + Cd*(1 - SRC1=(As-Cs))
703            MixBlendMode::Multiply if dual_source => BlendMode::MultiplyDualSource,
704            // Otherwise, use advanced blend without coherency if available.
705            _ if advanced_blend => BlendMode::Advanced(mode),
706            // If advanced blend is not available, then we have to use brush_mix_blend.
707            _ => return None,
708        })
709    }
710}
711
712/// Information about the state of the debugging / profiler overlay in native compositing mode.
713struct DebugOverlayState {
714    /// True if any of the current debug flags will result in drawing a debug overlay.
715    is_enabled: bool,
716
717    /// The current size of the debug overlay surface. None implies that the
718    /// debug surface isn't currently allocated.
719    current_size: Option<DeviceIntSize>,
720
721    layer_index: usize,
722}
723
724impl DebugOverlayState {
725    fn new() -> Self {
726        DebugOverlayState {
727            is_enabled: false,
728            current_size: None,
729            layer_index: 0,
730        }
731    }
732}
733
734/// Tracks buffer damage rects over a series of frames.
735#[derive(Debug, Default)]
736pub(crate) struct BufferDamageTracker {
737    damage_rects: [DeviceRect; 4],
738    current_offset: usize,
739}
740
741impl BufferDamageTracker {
742    /// Sets the damage rect for the current frame. Should only be called *after*
743    /// get_damage_rect() has been called to get the current backbuffer's damage rect.
744    fn push_dirty_rect(&mut self, rect: &DeviceRect) {
745        self.damage_rects[self.current_offset] = rect.clone();
746        self.current_offset = match self.current_offset {
747            0 => self.damage_rects.len() - 1,
748            n => n - 1,
749        }
750    }
751
752    /// Gets the damage rect for the current backbuffer, given the backbuffer's age.
753    /// (The number of frames since it was previously the backbuffer.)
754    /// Returns an empty rect if the buffer is valid, and None if the entire buffer is invalid.
755    fn get_damage_rect(&self, buffer_age: usize) -> Option<DeviceRect> {
756        match buffer_age {
757            // 0 means this is a new buffer, so is completely invalid.
758            0 => None,
759            // 1 means this backbuffer was also the previous frame's backbuffer
760            // (so must have been copied to the frontbuffer). It is therefore entirely valid.
761            1 => Some(DeviceRect::zero()),
762            // We must calculate the union of the damage rects since this buffer was previously
763            // the backbuffer.
764            n if n <= self.damage_rects.len() + 1 => {
765                Some(
766                    self.damage_rects.iter()
767                        .cycle()
768                        .skip(self.current_offset + 1)
769                        .take(n - 1)
770                        .fold(DeviceRect::zero(), |acc, r| acc.union(r))
771                )
772            }
773            // The backbuffer is older than the number of frames for which we track,
774            // so we treat it as entirely invalid.
775            _ => None,
776        }
777    }
778}
779
780/// The renderer is responsible for submitting to the GPU the work prepared by the
781/// RenderBackend.
782///
783/// We have a separate `Renderer` instance for each instance of WebRender (generally
784/// one per OS window), and all instances share the same thread.
785pub struct Renderer {
786    result_rx: Receiver<ResultMsg>,
787    api_tx: Sender<ApiMsg>,
788    pub device: Device,
789    pending_texture_updates: Vec<TextureUpdateList>,
790    /// True if there are any TextureCacheUpdate pending.
791    pending_texture_cache_updates: bool,
792    pending_native_surface_updates: Vec<NativeSurfaceOperation>,
793    pending_gpu_cache_updates: Vec<GpuCacheUpdateList>,
794    pending_gpu_cache_clear: bool,
795    pending_shader_updates: Vec<PathBuf>,
796    active_documents: FastHashMap<DocumentId, RenderedDocument>,
797
798    shaders: Rc<RefCell<Shaders>>,
799
800    max_recorded_profiles: usize,
801
802    clear_color: ColorF,
803    enable_clear_scissor: bool,
804    enable_advanced_blend_barriers: bool,
805    clear_caches_with_quads: bool,
806    clear_alpha_targets_with_quads: bool,
807
808    debug: debug::LazyInitializedDebugRenderer,
809    debug_flags: DebugFlags,
810    profile: TransactionProfile,
811    frame_counter: u64,
812    resource_upload_time: f64,
813    gpu_cache_upload_time: f64,
814    profiler: Profiler,
815
816    last_time: u64,
817
818    pub gpu_profiler: GpuProfiler,
819    vaos: vertex::RendererVAOs,
820
821    gpu_cache_texture: gpu_cache::GpuCacheTexture,
822    vertex_data_textures: Vec<vertex::VertexDataTextures>,
823    current_vertex_data_textures: usize,
824
825    /// When the GPU cache debugger is enabled, we keep track of the live blocks
826    /// in the GPU cache so that we can use them for the debug display. This
827    /// member stores those live blocks, indexed by row.
828    gpu_cache_debug_chunks: Vec<Vec<GpuCacheDebugChunk>>,
829
830    gpu_cache_frame_id: FrameId,
831    gpu_cache_overflow: bool,
832
833    pipeline_info: PipelineInfo,
834
835    // Manages and resolves source textures IDs to real texture IDs.
836    texture_resolver: TextureResolver,
837
838    texture_upload_pbo_pool: UploadPBOPool,
839    staging_texture_pool: UploadTexturePool,
840
841    dither_matrix_texture: Option<Texture>,
842
843    /// Optional trait object that allows the client
844    /// application to provide external buffers for image data.
845    external_image_handler: Option<Box<dyn ExternalImageHandler>>,
846
847    /// Optional function pointers for measuring memory used by a given
848    /// heap-allocated pointer.
849    size_of_ops: Option<MallocSizeOfOps>,
850
851    pub renderer_errors: Vec<RendererError>,
852
853    pub(in crate) async_frame_recorder: Option<AsyncScreenshotGrabber>,
854    pub(in crate) async_screenshots: Option<AsyncScreenshotGrabber>,
855
856    /// List of profile results from previous frames. Can be retrieved
857    /// via get_frame_profiles().
858    cpu_profiles: VecDeque<CpuProfile>,
859    gpu_profiles: VecDeque<GpuProfile>,
860
861    /// Notification requests to be fulfilled after rendering.
862    notifications: Vec<NotificationRequest>,
863
864    device_size: Option<DeviceIntSize>,
865
866    /// A lazily created texture for the zoom debugging widget.
867    zoom_debug_texture: Option<Texture>,
868
869    /// The current mouse position. This is used for debugging
870    /// functionality only, such as the debug zoom widget.
871    cursor_position: DeviceIntPoint,
872
873    /// Guards to check if we might be rendering a frame with expired texture
874    /// cache entries.
875    shared_texture_cache_cleared: bool,
876
877    /// The set of documents which we've seen a publish for since last render.
878    documents_seen: FastHashSet<DocumentId>,
879
880    #[cfg(feature = "capture")]
881    read_fbo: FBOId,
882    #[cfg(feature = "replay")]
883    owned_external_images: FastHashMap<(ExternalImageId, u8), ExternalTexture>,
884
885    /// The compositing config, affecting how WR composites into the final scene.
886    compositor_config: CompositorConfig,
887    current_compositor_kind: CompositorKind,
888
889    /// Maintains a set of allocated native composite surfaces. This allows any
890    /// currently allocated surfaces to be cleaned up as soon as deinit() is
891    /// called (the normal bookkeeping for native surfaces exists in the
892    /// render backend thread).
893    allocated_native_surfaces: FastHashSet<NativeSurfaceId>,
894
895    /// If true, partial present state has been reset and everything needs to
896    /// be drawn on the next render.
897    force_redraw: bool,
898
899    /// State related to the debug / profiling overlays
900    debug_overlay_state: DebugOverlayState,
901
902    /// Tracks the dirty rectangles from previous frames. Used on platforms
903    /// that require keeping the front buffer fully correct when doing
904    /// partial present (e.g. unix desktop with EGL_EXT_buffer_age).
905    buffer_damage_tracker: BufferDamageTracker,
906
907    max_primitive_instance_count: usize,
908    enable_instancing: bool,
909
910    /// Count consecutive oom frames to detectif we are stuck unable to render
911    /// in a loop.
912    consecutive_oom_frames: u32,
913
914    /// update() defers processing of ResultMsg, if frame_publish_id of
915    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
916    target_frame_publish_id: Option<FramePublishId>,
917
918    /// Hold a next ResultMsg that will be handled by update().
919    pending_result_msg: Option<ResultMsg>,
920}
921
922#[derive(Debug)]
923pub enum RendererError {
924    Shader(ShaderError),
925    Thread(std::io::Error),
926    MaxTextureSize,
927    SoftwareRasterizer,
928    OutOfMemory,
929}
930
931impl From<ShaderError> for RendererError {
932    fn from(err: ShaderError) -> Self {
933        RendererError::Shader(err)
934    }
935}
936
937impl From<std::io::Error> for RendererError {
938    fn from(err: std::io::Error) -> Self {
939        RendererError::Thread(err)
940    }
941}
942
943impl Renderer {
944    pub fn device_size(&self) -> Option<DeviceIntSize> {
945        self.device_size
946    }
947
948    /// Update the current position of the debug cursor.
949    pub fn set_cursor_position(
950        &mut self,
951        position: DeviceIntPoint,
952    ) {
953        self.cursor_position = position;
954    }
955
956    pub fn get_max_texture_size(&self) -> i32 {
957        self.device.max_texture_size()
958    }
959
960    pub fn get_graphics_api_info(&self) -> GraphicsApiInfo {
961        GraphicsApiInfo {
962            kind: GraphicsApi::OpenGL,
963            version: self.device.gl().get_string(gl::VERSION),
964            renderer: self.device.gl().get_string(gl::RENDERER),
965        }
966    }
967
968    pub fn preferred_color_format(&self) -> ImageFormat {
969        self.device.preferred_color_formats().external
970    }
971
972    pub fn required_texture_stride_alignment(&self, format: ImageFormat) -> usize {
973        self.device.required_pbo_stride().num_bytes(format).get()
974    }
975
976    pub fn set_clear_color(&mut self, color: ColorF) {
977        self.clear_color = color;
978    }
979
980    pub fn flush_pipeline_info(&mut self) -> PipelineInfo {
981        mem::replace(&mut self.pipeline_info, PipelineInfo::default())
982    }
983
984    /// Returns the Epoch of the current frame in a pipeline.
985    pub fn current_epoch(&self, document_id: DocumentId, pipeline_id: PipelineId) -> Option<Epoch> {
986        self.pipeline_info.epochs.get(&(pipeline_id, document_id)).cloned()
987    }
988
989    fn get_next_result_msg(&mut self) -> Option<ResultMsg> {
990        if self.pending_result_msg.is_none() {
991            if let Ok(msg) = self.result_rx.try_recv() {
992                self.pending_result_msg = Some(msg);
993            }
994        }
995
996        match (&self.pending_result_msg, &self.target_frame_publish_id) {
997          (Some(ResultMsg::PublishDocument(frame_publish_id, _, _, _)), Some(target_id)) => {
998            if frame_publish_id > target_id {
999              return None;
1000            }
1001          }
1002          _ => {}
1003        }
1004
1005        self.pending_result_msg.take()
1006    }
1007
1008    /// Processes the result queue.
1009    ///
1010    /// Should be called before `render()`, as texture cache updates are done here.
1011    pub fn update(&mut self) {
1012        profile_scope!("update");
1013
1014        // Pull any pending results and return the most recent.
1015        while let Some(msg) = self.get_next_result_msg() {
1016            match msg {
1017                ResultMsg::PublishPipelineInfo(mut pipeline_info) => {
1018                    for ((pipeline_id, document_id), epoch) in pipeline_info.epochs {
1019                        self.pipeline_info.epochs.insert((pipeline_id, document_id), epoch);
1020                    }
1021                    self.pipeline_info.removed_pipelines.extend(pipeline_info.removed_pipelines.drain(..));
1022                }
1023                ResultMsg::PublishDocument(
1024                    _,
1025                    document_id,
1026                    mut doc,
1027                    resource_update_list,
1028                ) => {
1029                    // Add a new document to the active set
1030
1031                    // If the document we are replacing must be drawn (in order to
1032                    // update the texture cache), issue a render just to
1033                    // off-screen targets, ie pass None to render_impl. We do this
1034                    // because a) we don't need to render to the main framebuffer
1035                    // so it is cheaper not to, and b) doing so without a
1036                    // subsequent present would break partial present.
1037                    let prev_frame_memory = if let Some(mut prev_doc) = self.active_documents.remove(&document_id) {
1038                        doc.profile.merge(&mut prev_doc.profile);
1039
1040                        if prev_doc.frame.must_be_drawn() {
1041                            prev_doc.render_reasons |= RenderReasons::TEXTURE_CACHE_FLUSH;
1042                            self.render_impl(
1043                                document_id,
1044                                &mut prev_doc,
1045                                None,
1046                                0,
1047                            ).ok();
1048                        }
1049
1050                        Some(prev_doc.frame.allocator_memory)
1051                    } else {
1052                        None
1053                    };
1054
1055                    if let Some(memory) = prev_frame_memory {
1056                        // We just dropped the frame a few lives above. There should be no
1057                        // live allocations left in the frame's memory.
1058                        memory.assert_memory_reusable();
1059                    }
1060
1061                    self.active_documents.insert(document_id, doc);
1062
1063                    // IMPORTANT: The pending texture cache updates must be applied
1064                    //            *after* the previous frame has been rendered above
1065                    //            (if neceessary for a texture cache update). For
1066                    //            an example of why this is required:
1067                    //            1) Previous frame contains a render task that
1068                    //               targets Texture X.
1069                    //            2) New frame contains a texture cache update which
1070                    //               frees Texture X.
1071                    //            3) bad stuff happens.
1072
1073                    //TODO: associate `document_id` with target window
1074                    self.pending_texture_cache_updates |= !resource_update_list.texture_updates.updates.is_empty();
1075                    self.pending_texture_updates.push(resource_update_list.texture_updates);
1076                    self.pending_native_surface_updates.extend(resource_update_list.native_surface_updates);
1077                    self.documents_seen.insert(document_id);
1078                }
1079                ResultMsg::UpdateGpuCache(mut list) => {
1080                    if list.clear {
1081                        self.pending_gpu_cache_clear = true;
1082                    }
1083                    if list.clear {
1084                        self.gpu_cache_debug_chunks = Vec::new();
1085                    }
1086                    for cmd in mem::replace(&mut list.debug_commands, Vec::new()) {
1087                        match cmd {
1088                            GpuCacheDebugCmd::Alloc(chunk) => {
1089                                let row = chunk.address.v as usize;
1090                                if row >= self.gpu_cache_debug_chunks.len() {
1091                                    self.gpu_cache_debug_chunks.resize(row + 1, Vec::new());
1092                                }
1093                                self.gpu_cache_debug_chunks[row].push(chunk);
1094                            },
1095                            GpuCacheDebugCmd::Free(address) => {
1096                                let chunks = &mut self.gpu_cache_debug_chunks[address.v as usize];
1097                                let pos = chunks.iter()
1098                                    .position(|x| x.address == address).unwrap();
1099                                chunks.remove(pos);
1100                            },
1101                        }
1102                    }
1103                    self.pending_gpu_cache_updates.push(list);
1104                }
1105                ResultMsg::UpdateResources {
1106                    resource_updates,
1107                    memory_pressure,
1108                } => {
1109                    if memory_pressure {
1110                        // If a memory pressure event arrives _after_ a new scene has
1111                        // been published that writes persistent targets (i.e. cached
1112                        // render tasks to the texture cache, or picture cache tiles)
1113                        // but _before_ the next update/render loop, those targets
1114                        // will not be updated due to the active_documents list being
1115                        // cleared at the end of this message. To work around that,
1116                        // if any of the existing documents have not rendered yet, and
1117                        // have picture/texture cache targets, force a render so that
1118                        // those targets are updated.
1119                        let active_documents = mem::replace(
1120                            &mut self.active_documents,
1121                            FastHashMap::default(),
1122                        );
1123                        for (doc_id, mut doc) in active_documents {
1124                            if doc.frame.must_be_drawn() {
1125                                // As this render will not be presented, we must pass None to
1126                                // render_impl. This avoids interfering with partial present
1127                                // logic, as well as being more efficient.
1128                                self.render_impl(
1129                                    doc_id,
1130                                    &mut doc,
1131                                    None,
1132                                    0,
1133                                ).ok();
1134                            }
1135                        }
1136                    }
1137
1138                    self.pending_texture_cache_updates |= !resource_updates.texture_updates.updates.is_empty();
1139                    self.pending_texture_updates.push(resource_updates.texture_updates);
1140                    self.pending_native_surface_updates.extend(resource_updates.native_surface_updates);
1141                    self.device.begin_frame();
1142
1143                    self.update_texture_cache();
1144                    self.update_native_surfaces();
1145
1146                    // Flush the render target pool on memory pressure.
1147                    //
1148                    // This needs to be separate from the block below because
1149                    // the device module asserts if we delete textures while
1150                    // not in a frame.
1151                    if memory_pressure {
1152                        self.texture_upload_pbo_pool.on_memory_pressure(&mut self.device);
1153                        self.staging_texture_pool.delete_textures(&mut self.device);
1154                    }
1155
1156                    self.device.end_frame();
1157                }
1158                ResultMsg::RenderDocumentOffscreen(document_id, mut offscreen_doc, resources) => {
1159                    // Flush pending operations if needed (See comment in the match arm for
1160                    // PublishPipelineInfo).
1161
1162                    // Borrow-ck dance.
1163                    let prev_doc = self.active_documents.remove(&document_id);
1164                    if let Some(mut prev_doc) = prev_doc {
1165                        if prev_doc.frame.must_be_drawn() {
1166                            prev_doc.render_reasons |= RenderReasons::TEXTURE_CACHE_FLUSH;
1167                            self.render_impl(
1168                                document_id,
1169                                &mut prev_doc,
1170                                None,
1171                                0,
1172                            ).ok();
1173                        }
1174
1175                        self.active_documents.insert(document_id, prev_doc);
1176                    }
1177
1178                    // Now update resources and render the offscreen frame.
1179
1180                    self.pending_texture_cache_updates |= !resources.texture_updates.updates.is_empty();
1181                    self.pending_texture_updates.push(resources.texture_updates);
1182                    self.pending_native_surface_updates.extend(resources.native_surface_updates);
1183
1184                    self.render_impl(
1185                        document_id,
1186                        &mut offscreen_doc,
1187                        None,
1188                        0,
1189                    ).unwrap();
1190                }
1191                ResultMsg::AppendNotificationRequests(mut notifications) => {
1192                    // We need to know specifically if there are any pending
1193                    // TextureCacheUpdate updates in any of the entries in
1194                    // pending_texture_updates. They may simply be nops, which do not
1195                    // need to prevent issuing the notification, and if so, may not
1196                    // cause a timely frame render to occur to wake up any listeners.
1197                    if !self.pending_texture_cache_updates {
1198                        drain_filter(
1199                            &mut notifications,
1200                            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
1201                            |n| { n.notify(); },
1202                        );
1203                    }
1204                    self.notifications.append(&mut notifications);
1205                }
1206                ResultMsg::ForceRedraw => {
1207                    self.force_redraw = true;
1208                }
1209                ResultMsg::RefreshShader(path) => {
1210                    self.pending_shader_updates.push(path);
1211                }
1212                ResultMsg::SetParameter(ref param) => {
1213                    self.device.set_parameter(param);
1214                    self.profiler.set_parameter(param);
1215                }
1216                ResultMsg::DebugOutput(output) => match output {
1217                    #[cfg(feature = "capture")]
1218                    DebugOutput::SaveCapture(config, deferred) => {
1219                        self.save_capture(config, deferred);
1220                    }
1221                    #[cfg(feature = "replay")]
1222                    DebugOutput::LoadCapture(config, plain_externals) => {
1223                        self.active_documents.clear();
1224                        self.load_capture(config, plain_externals);
1225                    }
1226                },
1227                ResultMsg::DebugCommand(command) => {
1228                    self.handle_debug_command(command);
1229                }
1230            }
1231        }
1232    }
1233
1234    /// update() defers processing of ResultMsg, if frame_publish_id of
1235    /// ResultMsg::PublishDocument exceeds target_frame_publish_id.
1236    pub fn set_target_frame_publish_id(&mut self, publish_id: FramePublishId) {
1237        self.target_frame_publish_id = Some(publish_id);
1238    }
1239
1240    fn handle_debug_command(&mut self, command: DebugCommand) {
1241        match command {
1242            DebugCommand::SetPictureTileSize(_) |
1243            DebugCommand::SetMaximumSurfaceSize(_) => {
1244                panic!("Should be handled by render backend");
1245            }
1246            DebugCommand::SaveCapture(..) |
1247            DebugCommand::LoadCapture(..) |
1248            DebugCommand::StartCaptureSequence(..) |
1249            DebugCommand::StopCaptureSequence => {
1250                panic!("Capture commands are not welcome here! Did you build with 'capture' feature?")
1251            }
1252            DebugCommand::ClearCaches(_)
1253            | DebugCommand::SimulateLongSceneBuild(_)
1254            | DebugCommand::EnableNativeCompositor(_)
1255            | DebugCommand::SetBatchingLookback(_) => {}
1256            DebugCommand::InvalidateGpuCache => {
1257                self.gpu_cache_texture.invalidate();
1258            }
1259            DebugCommand::SetFlags(flags) => {
1260                self.set_debug_flags(flags);
1261            }
1262        }
1263    }
1264
1265    /// Set a callback for handling external images.
1266    pub fn set_external_image_handler(&mut self, handler: Box<dyn ExternalImageHandler>) {
1267        self.external_image_handler = Some(handler);
1268    }
1269
1270    /// Retrieve (and clear) the current list of recorded frame profiles.
1271    pub fn get_frame_profiles(&mut self) -> (Vec<CpuProfile>, Vec<GpuProfile>) {
1272        let cpu_profiles = self.cpu_profiles.drain(..).collect();
1273        let gpu_profiles = self.gpu_profiles.drain(..).collect();
1274        (cpu_profiles, gpu_profiles)
1275    }
1276
1277    /// Reset the current partial present state. This forces the entire framebuffer
1278    /// to be refreshed next time `render` is called.
1279    pub fn force_redraw(&mut self) {
1280        self.force_redraw = true;
1281    }
1282
1283    /// Renders the current frame.
1284    ///
1285    /// A Frame is supplied by calling [`generate_frame()`][webrender_api::Transaction::generate_frame].
1286    /// buffer_age is the age of the current backbuffer. It is only relevant if partial present
1287    /// is active, otherwise 0 should be passed here.
1288    pub fn render(
1289        &mut self,
1290        device_size: DeviceIntSize,
1291        buffer_age: usize,
1292    ) -> Result<RenderResults, Vec<RendererError>> {
1293        self.device_size = Some(device_size);
1294
1295        // TODO(gw): We want to make the active document that is
1296        //           being rendered configurable via the public
1297        //           API in future. For now, just select the last
1298        //           added document as the active one to render
1299        //           (Gecko only ever creates a single document
1300        //           per renderer right now).
1301        let doc_id = self.active_documents.keys().last().cloned();
1302
1303        let result = match doc_id {
1304            Some(doc_id) => {
1305                // Remove the doc from the map to appease the borrow checker
1306                let mut doc = self.active_documents
1307                    .remove(&doc_id)
1308                    .unwrap();
1309
1310                let size = if !device_size.is_empty() {
1311                    Some(device_size)
1312                } else {
1313                    None
1314                };
1315
1316                let result = self.render_impl(
1317                    doc_id,
1318                    &mut doc,
1319                    size,
1320                    buffer_age,
1321                );
1322
1323                self.active_documents.insert(doc_id, doc);
1324
1325                result
1326            }
1327            None => {
1328                self.last_time = precise_time_ns();
1329                Ok(RenderResults::default())
1330            }
1331        };
1332
1333        drain_filter(
1334            &mut self.notifications,
1335            |n| { n.when() == Checkpoint::FrameRendered },
1336            |n| { n.notify(); },
1337        );
1338
1339        let mut oom = false;
1340        if let Err(ref errors) = result {
1341            for error in errors {
1342                if matches!(error, &RendererError::OutOfMemory) {
1343                    oom = true;
1344                    break;
1345                }
1346            }
1347        }
1348
1349        if oom {
1350            let _ = self.api_tx.send(ApiMsg::MemoryPressure);
1351            // Ensure we don't get stuck in a loop.
1352            self.consecutive_oom_frames += 1;
1353            assert!(self.consecutive_oom_frames < 5, "Renderer out of memory");
1354        } else {
1355            self.consecutive_oom_frames = 0;
1356        }
1357
1358        // This is the end of the rendering pipeline. If some notifications are is still there,
1359        // just clear them and they will autimatically fire the Checkpoint::TransactionDropped
1360        // event. Otherwise they would just pile up in this vector forever.
1361        self.notifications.clear();
1362
1363        tracy_frame_marker!();
1364
1365        result
1366    }
1367
1368    /// Update the state of any debug / profiler overlays. This is currently only needed
1369    /// when running with the native compositor enabled.
1370    fn update_debug_overlay(
1371        &mut self,
1372        framebuffer_size: DeviceIntSize,
1373        has_debug_items: bool,
1374    ) {
1375        // If any of the following debug flags are set, something will be drawn on the debug overlay.
1376        self.debug_overlay_state.is_enabled = has_debug_items || self.debug_flags.intersects(
1377            DebugFlags::PROFILER_DBG |
1378            DebugFlags::RENDER_TARGET_DBG |
1379            DebugFlags::TEXTURE_CACHE_DBG |
1380            DebugFlags::EPOCHS |
1381            DebugFlags::GPU_CACHE_DBG |
1382            DebugFlags::PICTURE_CACHING_DBG |
1383            DebugFlags::PRIMITIVE_DBG |
1384            DebugFlags::ZOOM_DBG |
1385            DebugFlags::WINDOW_VISIBILITY_DBG
1386        );
1387
1388        // Update the debug overlay surface, if we are running in native compositor mode.
1389        if let CompositorKind::Native { .. } = self.current_compositor_kind {
1390            let compositor = self.compositor_config.compositor().unwrap();
1391
1392            // If there is a current surface, destroy it if we don't need it for this frame, or if
1393            // the size has changed.
1394            if let Some(current_size) = self.debug_overlay_state.current_size {
1395                if !self.debug_overlay_state.is_enabled || current_size != framebuffer_size {
1396                    compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
1397                    self.debug_overlay_state.current_size = None;
1398                }
1399            }
1400
1401            // Allocate a new surface, if we need it and there isn't one.
1402            if self.debug_overlay_state.is_enabled && self.debug_overlay_state.current_size.is_none() {
1403                compositor.create_surface(
1404                    &mut self.device,
1405                    NativeSurfaceId::DEBUG_OVERLAY,
1406                    DeviceIntPoint::zero(),
1407                    framebuffer_size,
1408                    false,
1409                );
1410                compositor.create_tile(
1411                    &mut self.device,
1412                    NativeTileId::DEBUG_OVERLAY,
1413                );
1414                self.debug_overlay_state.current_size = Some(framebuffer_size);
1415            }
1416        }
1417    }
1418
1419    /// Bind a draw target for the debug / profiler overlays, if required.
1420    fn bind_debug_overlay(&mut self, device_size: DeviceIntSize) -> Option<DrawTarget> {
1421        // Debug overlay setup are only required in native compositing mode
1422        if self.debug_overlay_state.is_enabled {
1423            match self.current_compositor_kind {
1424                CompositorKind::Native { .. } => {
1425                    let compositor = self.compositor_config.compositor().unwrap();
1426                    let surface_size = self.debug_overlay_state.current_size.unwrap();
1427
1428                    // Ensure old surface is invalidated before binding
1429                    compositor.invalidate_tile(
1430                        &mut self.device,
1431                        NativeTileId::DEBUG_OVERLAY,
1432                        DeviceIntRect::from_size(surface_size),
1433                    );
1434                    // Bind the native surface
1435                    let surface_info = compositor.bind(
1436                        &mut self.device,
1437                        NativeTileId::DEBUG_OVERLAY,
1438                        DeviceIntRect::from_size(surface_size),
1439                        DeviceIntRect::from_size(surface_size),
1440                    );
1441
1442                    // Bind the native surface to current FBO target
1443                    let draw_target = DrawTarget::NativeSurface {
1444                        offset: surface_info.origin,
1445                        external_fbo_id: surface_info.fbo_id,
1446                        dimensions: surface_size,
1447                    };
1448                    self.device.bind_draw_target(draw_target);
1449
1450                    // When native compositing, clear the debug overlay each frame.
1451                    self.device.clear_target(
1452                        Some([0.0, 0.0, 0.0, 0.0]),
1453                        None, // debug renderer does not use depth
1454                        None,
1455                    );
1456
1457                    Some(draw_target)
1458                }
1459                CompositorKind::Layer { .. } => {
1460                    let compositor = self.compositor_config.layer_compositor().unwrap();
1461                    compositor.bind_layer(self.debug_overlay_state.layer_index, &[]);
1462
1463                    self.device.clear_target(
1464                        Some([0.0, 0.0, 0.0, 0.0]),
1465                        None, // debug renderer does not use depth
1466                        None,
1467                    );
1468
1469                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
1470                }
1471                CompositorKind::Draw { .. } => {
1472                    // If we're not using the native compositor, then the default
1473                    // frame buffer is already bound. Create a DrawTarget for it and
1474                    // return it.
1475                    Some(DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left()))
1476                }
1477            }
1478        } else {
1479            None
1480        }
1481    }
1482
1483    /// Unbind the draw target for debug / profiler overlays, if required.
1484    fn unbind_debug_overlay(&mut self) {
1485        // Debug overlay setup are only required in native compositing mode
1486        if self.debug_overlay_state.is_enabled {
1487            match self.current_compositor_kind {
1488                CompositorKind::Native { .. } => {
1489                    let compositor = self.compositor_config.compositor().unwrap();
1490                    // Unbind the draw target and add it to the visual tree to be composited
1491                    compositor.unbind(&mut self.device);
1492
1493                    let clip_rect = DeviceIntRect::from_size(
1494                        self.debug_overlay_state.current_size.unwrap(),
1495                    );
1496
1497                    compositor.add_surface(
1498                        &mut self.device,
1499                        NativeSurfaceId::DEBUG_OVERLAY,
1500                        CompositorSurfaceTransform::identity(),
1501                        clip_rect,
1502                        ImageRendering::Auto,
1503                        clip_rect,
1504                        ClipRadius::EMPTY,
1505                    );
1506                }
1507                CompositorKind::Draw { .. } => {}
1508                CompositorKind::Layer { .. } => {
1509                    let compositor = self.compositor_config.layer_compositor().unwrap();
1510                    compositor.present_layer(self.debug_overlay_state.layer_index, &[]);
1511                }
1512            }
1513        }
1514    }
1515
1516    // If device_size is None, don't render to the main frame buffer. This is useful to
1517    // update texture cache render tasks but avoid doing a full frame render. If the
1518    // render is not going to be presented, then this must be set to None, as performing a
1519    // composite without a present will confuse partial present.
1520    fn render_impl(
1521        &mut self,
1522        doc_id: DocumentId,
1523        active_doc: &mut RenderedDocument,
1524        mut device_size: Option<DeviceIntSize>,
1525        buffer_age: usize,
1526    ) -> Result<RenderResults, Vec<RendererError>> {
1527        profile_scope!("render");
1528        let mut results = RenderResults::default();
1529        self.profile.end_time_if_started(profiler::FRAME_SEND_TIME);
1530        self.profile.start_time(profiler::RENDERER_TIME);
1531
1532        self.staging_texture_pool.begin_frame();
1533
1534        let compositor_kind = active_doc.frame.composite_state.compositor_kind;
1535        // CompositorKind is updated
1536        if self.current_compositor_kind != compositor_kind {
1537            let enable = match (self.current_compositor_kind, compositor_kind) {
1538                (CompositorKind::Native { .. }, CompositorKind::Draw { .. }) => {
1539                    if self.debug_overlay_state.current_size.is_some() {
1540                        self.compositor_config
1541                            .compositor()
1542                            .unwrap()
1543                            .destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
1544                        self.debug_overlay_state.current_size = None;
1545                    }
1546                    false
1547                }
1548                (CompositorKind::Draw { .. }, CompositorKind::Native { .. }) => {
1549                    true
1550                }
1551                (current_compositor_kind, active_doc_compositor_kind) => {
1552                    warn!("Compositor mismatch, assuming this is Wrench running. Current {:?}, active {:?}",
1553                        current_compositor_kind, active_doc_compositor_kind);
1554                    false
1555                }
1556            };
1557
1558            if let Some(config) = self.compositor_config.compositor() {
1559                config.enable_native_compositor(&mut self.device, enable);
1560            }
1561            self.current_compositor_kind = compositor_kind;
1562        }
1563
1564        // The texture resolver scope should be outside of any rendering, including
1565        // debug rendering. This ensures that when we return render targets to the
1566        // pool via glInvalidateFramebuffer, we don't do any debug rendering after
1567        // that point. Otherwise, the bind / invalidate / bind logic trips up the
1568        // render pass logic in tiled / mobile GPUs, resulting in an extra copy /
1569        // resolve step when the debug overlay is enabled.
1570        self.texture_resolver.begin_frame();
1571
1572        if let Some(device_size) = device_size {
1573            self.update_gpu_profile(device_size);
1574        }
1575
1576        let cpu_frame_id = {
1577            let _gm = self.gpu_profiler.start_marker("begin frame");
1578            let frame_id = self.device.begin_frame();
1579            self.gpu_profiler.begin_frame(frame_id);
1580
1581            self.device.disable_scissor();
1582            self.device.disable_depth();
1583            self.set_blend(false, FramebufferKind::Main);
1584            //self.update_shaders();
1585
1586            self.update_texture_cache();
1587            self.update_native_surfaces();
1588
1589            frame_id
1590        };
1591
1592        if !active_doc.frame.present {
1593            // Setting device_size to None is what ensures compositing/presenting
1594            // the frame is skipped in the rest of this module.
1595            device_size = None;
1596        }
1597
1598        if let Some(device_size) = device_size {
1599            // Inform the client that we are starting a composition transaction if native
1600            // compositing is enabled. This needs to be done early in the frame, so that
1601            // we can create debug overlays after drawing the main surfaces.
1602            if let CompositorKind::Native { .. } = self.current_compositor_kind {
1603                let compositor = self.compositor_config.compositor().unwrap();
1604                compositor.begin_frame(&mut self.device);
1605            }
1606
1607            // Update the state of the debug overlay surface, ensuring that
1608            // the compositor mode has a suitable surface to draw to, if required.
1609            self.update_debug_overlay(device_size, !active_doc.frame.debug_items.is_empty());
1610        }
1611
1612        let frame = &mut active_doc.frame;
1613        let profile = &mut active_doc.profile;
1614        assert!(self.current_compositor_kind == frame.composite_state.compositor_kind);
1615
1616        if self.shared_texture_cache_cleared {
1617            assert!(self.documents_seen.contains(&doc_id),
1618                    "Cleared texture cache without sending new document frame.");
1619        }
1620
1621        match self.prepare_gpu_cache(&frame.deferred_resolves) {
1622            Ok(..) => {
1623                assert!(frame.gpu_cache_frame_id <= self.gpu_cache_frame_id,
1624                    "Received frame depends on a later GPU cache epoch ({:?}) than one we received last via `UpdateGpuCache` ({:?})",
1625                    frame.gpu_cache_frame_id, self.gpu_cache_frame_id);
1626
1627                self.draw_frame(
1628                    frame,
1629                    device_size,
1630                    buffer_age,
1631                    &mut results,
1632                );
1633
1634                // TODO(nical): do this automatically by selecting counters in the wr profiler
1635                // Profile marker for the number of invalidated picture cache
1636                if thread_is_being_profiled() {
1637                    let duration = Duration::new(0,0);
1638                    if let Some(n) = self.profile.get(profiler::RENDERED_PICTURE_TILES) {
1639                        let message = (n as usize).to_string();
1640                        add_text_marker("NumPictureCacheInvalidated", &message, duration);
1641                    }
1642                }
1643
1644                if device_size.is_some() {
1645                    self.draw_frame_debug_items(&frame.debug_items);
1646                }
1647
1648                self.profile.merge(profile);
1649            }
1650            Err(e) => {
1651                self.renderer_errors.push(e);
1652            }
1653        }
1654
1655        self.unlock_external_images(&frame.deferred_resolves);
1656
1657        let _gm = self.gpu_profiler.start_marker("end frame");
1658        self.gpu_profiler.end_frame();
1659
1660        let t = self.profile.end_time(profiler::RENDERER_TIME);
1661        self.profile.end_time_if_started(profiler::TOTAL_FRAME_CPU_TIME);
1662
1663        let current_time = precise_time_ns();
1664        if device_size.is_some() {
1665            let time = profiler::ns_to_ms(current_time - self.last_time);
1666            self.profile.set(profiler::FRAME_TIME, time);
1667        }
1668
1669        let debug_overlay = device_size.and_then(|device_size| {
1670            // Bind a surface to draw the debug / profiler information to.
1671            self.bind_debug_overlay(device_size).map(|draw_target| {
1672                self.draw_render_target_debug(&draw_target);
1673                self.draw_texture_cache_debug(&draw_target);
1674                self.draw_gpu_cache_debug(device_size);
1675                self.draw_zoom_debug(device_size);
1676                self.draw_epoch_debug();
1677                self.draw_window_visibility_debug();
1678                draw_target
1679            })
1680        });
1681
1682        Telemetry::record_renderer_time(Duration::from_micros((t * 1000.00) as u64));
1683        if self.profile.get(profiler::SHADER_BUILD_TIME).is_none() {
1684          Telemetry::record_renderer_time_no_sc(Duration::from_micros((t * 1000.00) as u64));
1685        }
1686
1687        if self.max_recorded_profiles > 0 {
1688            while self.cpu_profiles.len() >= self.max_recorded_profiles {
1689                self.cpu_profiles.pop_front();
1690            }
1691            let cpu_profile = CpuProfile::new(
1692                cpu_frame_id,
1693                (self.profile.get_or(profiler::FRAME_BUILDING_TIME, 0.0) * 1000000.0) as u64,
1694                (self.profile.get_or(profiler::RENDERER_TIME, 0.0) * 1000000.0) as u64,
1695                self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize,
1696            );
1697            self.cpu_profiles.push_back(cpu_profile);
1698        }
1699
1700        if thread_is_being_profiled() {
1701            let duration = Duration::new(0,0);
1702            let message = (self.profile.get_or(profiler::DRAW_CALLS, 0.0) as usize).to_string();
1703            add_text_marker("NumDrawCalls", &message, duration);
1704        }
1705
1706        let report = self.texture_resolver.report_memory();
1707        self.profile.set(profiler::RENDER_TARGET_MEM, profiler::bytes_to_mb(report.render_target_textures));
1708        self.profile.set(profiler::PICTURE_TILES_MEM, profiler::bytes_to_mb(report.picture_tile_textures));
1709        self.profile.set(profiler::ATLAS_TEXTURES_MEM, profiler::bytes_to_mb(report.atlas_textures));
1710        self.profile.set(profiler::STANDALONE_TEXTURES_MEM, profiler::bytes_to_mb(report.standalone_textures));
1711
1712        self.profile.set(profiler::DEPTH_TARGETS_MEM, profiler::bytes_to_mb(self.device.depth_targets_memory()));
1713
1714        self.profile.set(profiler::TEXTURES_CREATED, self.device.textures_created);
1715        self.profile.set(profiler::TEXTURES_DELETED, self.device.textures_deleted);
1716
1717        results.stats.texture_upload_mb = self.profile.get_or(profiler::TEXTURE_UPLOADS_MEM, 0.0);
1718        self.frame_counter += 1;
1719        results.stats.resource_upload_time = self.resource_upload_time;
1720        self.resource_upload_time = 0.0;
1721        results.stats.gpu_cache_upload_time = self.gpu_cache_upload_time;
1722        self.gpu_cache_upload_time = 0.0;
1723
1724        if let Some(stats) = active_doc.frame_stats.take() {
1725          // Copy the full frame stats to RendererStats
1726          results.stats.merge(&stats);
1727
1728          self.profiler.update_frame_stats(stats);
1729        }
1730
1731        // Turn the render reasons bitflags into something we can see in the profiler.
1732        // For now this is just a binary yes/no for each bit, which means that when looking
1733        // at "Render reasons" in the profiler HUD the average view indicates the proportion
1734        // of frames that had the bit set over a half second window whereas max shows whether
1735        // the bit as been set at least once during that time window.
1736        // We could implement better ways to visualize this information.
1737        let add_markers = thread_is_being_profiled();
1738        for i in 0..RenderReasons::NUM_BITS {
1739            let counter = profiler::RENDER_REASON_FIRST + i as usize;
1740            let mut val = 0.0;
1741            let reason_bit = RenderReasons::from_bits_truncate(1 << i);
1742            if active_doc.render_reasons.contains(reason_bit) {
1743                val = 1.0;
1744                if add_markers {
1745                    let event_str = format!("Render reason {:?}", reason_bit);
1746                    add_event_marker(&event_str);
1747                }
1748            }
1749            self.profile.set(counter, val);
1750        }
1751        active_doc.render_reasons = RenderReasons::empty();
1752
1753
1754        self.texture_resolver.update_profile(&mut self.profile);
1755
1756        // Note: this clears the values in self.profile.
1757        self.profiler.set_counters(&mut self.profile);
1758
1759        // Note: profile counters must be set before this or they will count for next frame.
1760        self.profiler.update();
1761
1762        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
1763            if let Some(device_size) = device_size {
1764                //TODO: take device/pixel ratio into equation?
1765                if let Some(debug_renderer) = self.debug.get_mut(&mut self.device) {
1766                    self.profiler.draw_profile(
1767                        self.frame_counter,
1768                        debug_renderer,
1769                        device_size,
1770                    );
1771                }
1772            }
1773        }
1774
1775        if self.debug_flags.contains(DebugFlags::ECHO_DRIVER_MESSAGES) {
1776            self.device.echo_driver_messages();
1777        }
1778
1779        if let Some(debug_renderer) = self.debug.try_get_mut() {
1780            let small_screen = self.debug_flags.contains(DebugFlags::SMALL_SCREEN);
1781            let scale = if small_screen { 1.6 } else { 1.0 };
1782            // TODO(gw): Tidy this up so that compositor config integrates better
1783            //           with the (non-compositor) surface y-flip options.
1784            let surface_origin_is_top_left = match self.current_compositor_kind {
1785                CompositorKind::Native { .. } => true,
1786                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => self.device.surface_origin_is_top_left(),
1787            };
1788            // If there is a debug overlay, render it. Otherwise, just clear
1789            // the debug renderer.
1790            debug_renderer.render(
1791                &mut self.device,
1792                debug_overlay.and(device_size),
1793                scale,
1794                surface_origin_is_top_left,
1795            );
1796        }
1797
1798        self.staging_texture_pool.end_frame(&mut self.device);
1799        self.texture_upload_pbo_pool.end_frame(&mut self.device);
1800        self.device.end_frame();
1801
1802        if debug_overlay.is_some() {
1803            self.last_time = current_time;
1804
1805            // Unbind the target for the debug overlay. No debug or profiler drawing
1806            // can occur afer this point.
1807            self.unbind_debug_overlay();
1808        }
1809
1810        if device_size.is_some() {
1811            // Inform the client that we are finished this composition transaction if native
1812            // compositing is enabled. This must be called after any debug / profiling compositor
1813            // surfaces have been drawn and added to the visual tree.
1814            match self.current_compositor_kind {
1815                CompositorKind::Layer { .. } => {
1816                    let compositor = self.compositor_config.layer_compositor().unwrap();
1817                    compositor.end_frame();
1818                }
1819                CompositorKind::Native { .. } => {
1820                    profile_scope!("compositor.end_frame");
1821                    let compositor = self.compositor_config.compositor().unwrap();
1822                    compositor.end_frame(&mut self.device);
1823                }
1824                CompositorKind::Draw { .. } => {}
1825            }
1826        }
1827
1828        self.documents_seen.clear();
1829        self.shared_texture_cache_cleared = false;
1830
1831        self.check_gl_errors();
1832
1833        if self.renderer_errors.is_empty() {
1834            Ok(results)
1835        } else {
1836            Err(mem::replace(&mut self.renderer_errors, Vec::new()))
1837        }
1838    }
1839
1840    fn update_gpu_profile(&mut self, device_size: DeviceIntSize) {
1841        let _gm = self.gpu_profiler.start_marker("build samples");
1842        // Block CPU waiting for last frame's GPU profiles to arrive.
1843        // In general this shouldn't block unless heavily GPU limited.
1844        let (gpu_frame_id, timers, samplers) = self.gpu_profiler.build_samples();
1845
1846        if self.max_recorded_profiles > 0 {
1847            while self.gpu_profiles.len() >= self.max_recorded_profiles {
1848                self.gpu_profiles.pop_front();
1849            }
1850
1851            self.gpu_profiles.push_back(GpuProfile::new(gpu_frame_id, &timers));
1852        }
1853
1854        self.profiler.set_gpu_time_queries(timers);
1855
1856        if !samplers.is_empty() {
1857            let screen_fraction = 1.0 / device_size.to_f32().area();
1858
1859            fn accumulate_sampler_value(description: &str, samplers: &[GpuSampler]) -> f32 {
1860                let mut accum = 0.0;
1861                for sampler in samplers {
1862                    if sampler.tag.label != description {
1863                        continue;
1864                    }
1865
1866                    accum += sampler.count as f32;
1867                }
1868
1869                accum
1870            }
1871
1872            let alpha_targets = accumulate_sampler_value(&"Alpha targets", &samplers) * screen_fraction;
1873            let transparent_pass = accumulate_sampler_value(&"Transparent pass", &samplers) * screen_fraction;
1874            let opaque_pass = accumulate_sampler_value(&"Opaque pass", &samplers) * screen_fraction;
1875            self.profile.set(profiler::ALPHA_TARGETS_SAMPLERS, alpha_targets);
1876            self.profile.set(profiler::TRANSPARENT_PASS_SAMPLERS, transparent_pass);
1877            self.profile.set(profiler::OPAQUE_PASS_SAMPLERS, opaque_pass);
1878            self.profile.set(profiler::TOTAL_SAMPLERS, alpha_targets + transparent_pass + opaque_pass);
1879        }
1880    }
1881
1882    fn update_texture_cache(&mut self) {
1883        profile_scope!("update_texture_cache");
1884
1885        let _gm = self.gpu_profiler.start_marker("texture cache update");
1886        let mut pending_texture_updates = mem::replace(&mut self.pending_texture_updates, vec![]);
1887        self.pending_texture_cache_updates = false;
1888
1889        self.profile.start_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
1890
1891        let mut create_cache_texture_time = 0;
1892        let mut delete_cache_texture_time = 0;
1893
1894        for update_list in pending_texture_updates.drain(..) {
1895            // Handle copies from one texture to another.
1896            for ((src_tex, dst_tex), copies) in &update_list.copies {
1897
1898                let dest_texture = &self.texture_resolver.texture_cache_map[&dst_tex].texture;
1899                let dst_texture_size = dest_texture.get_dimensions().to_f32();
1900
1901                let mut copy_instances = Vec::new();
1902                for copy in copies {
1903                    copy_instances.push(CopyInstance {
1904                        src_rect: copy.src_rect.to_f32(),
1905                        dst_rect: copy.dst_rect.to_f32(),
1906                        dst_texture_size,
1907                    });
1908                }
1909
1910                let draw_target = DrawTarget::from_texture(dest_texture, false);
1911                self.device.bind_draw_target(draw_target);
1912
1913                self.shaders
1914                    .borrow_mut()
1915                    .ps_copy()
1916                    .bind(
1917                        &mut self.device,
1918                        &Transform3D::identity(),
1919                        None,
1920                        &mut self.renderer_errors,
1921                        &mut self.profile,
1922                    );
1923
1924                self.draw_instanced_batch(
1925                    &copy_instances,
1926                    VertexArrayKind::Copy,
1927                    &BatchTextures::composite_rgb(
1928                        TextureSource::TextureCache(*src_tex, Swizzle::default())
1929                    ),
1930                    &mut RendererStats::default(),
1931                );
1932            }
1933
1934            // Find any textures that will need to be deleted in this group of allocations.
1935            let mut pending_deletes = Vec::new();
1936            for allocation in &update_list.allocations {
1937                let old = self.texture_resolver.texture_cache_map.remove(&allocation.id);
1938                match allocation.kind {
1939                    TextureCacheAllocationKind::Alloc(_) => {
1940                        assert!(old.is_none(), "Renderer and backend disagree!");
1941                    }
1942                    TextureCacheAllocationKind::Reset(_) |
1943                    TextureCacheAllocationKind::Free => {
1944                        assert!(old.is_some(), "Renderer and backend disagree!");
1945                    }
1946                }
1947                if let Some(old) = old {
1948
1949                    // Regenerate the cache allocation info so we can search through deletes for reuse.
1950                    let size = old.texture.get_dimensions();
1951                    let info = TextureCacheAllocInfo {
1952                        width: size.width,
1953                        height: size.height,
1954                        format: old.texture.get_format(),
1955                        filter: old.texture.get_filter(),
1956                        target: old.texture.get_target(),
1957                        is_shared_cache: old.texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE),
1958                        has_depth: old.texture.supports_depth(),
1959                        category: old.category,
1960                    };
1961                    pending_deletes.push((old.texture, info));
1962                }
1963            }
1964            // Look for any alloc or reset that has matching alloc info and save it from being deleted.
1965            let mut reused_textures = VecDeque::with_capacity(pending_deletes.len());
1966            for allocation in &update_list.allocations {
1967                match allocation.kind {
1968                    TextureCacheAllocationKind::Alloc(ref info) |
1969                    TextureCacheAllocationKind::Reset(ref info) => {
1970                        reused_textures.push_back(
1971                            pending_deletes.iter()
1972                                .position(|(_, old_info)| *old_info == *info)
1973                                .map(|index| pending_deletes.swap_remove(index).0)
1974                        );
1975                    }
1976                    TextureCacheAllocationKind::Free => {}
1977                }
1978            }
1979
1980            // Now that we've saved as many deletions for reuse as we can, actually delete whatever is left.
1981            if !pending_deletes.is_empty() {
1982                let delete_texture_start = precise_time_ns();
1983                for (texture, _) in pending_deletes {
1984                    add_event_marker("TextureCacheFree");
1985                    self.device.delete_texture(texture);
1986                }
1987                delete_cache_texture_time += precise_time_ns() - delete_texture_start;
1988            }
1989
1990            for allocation in update_list.allocations {
1991                match allocation.kind {
1992                    TextureCacheAllocationKind::Alloc(_) => add_event_marker("TextureCacheAlloc"),
1993                    TextureCacheAllocationKind::Reset(_) => add_event_marker("TextureCacheReset"),
1994                    TextureCacheAllocationKind::Free => {}
1995                };
1996                match allocation.kind {
1997                    TextureCacheAllocationKind::Alloc(ref info) |
1998                    TextureCacheAllocationKind::Reset(ref info) => {
1999                        let create_cache_texture_start = precise_time_ns();
2000                        // Create a new native texture, as requested by the texture cache.
2001                        // If we managed to reuse a deleted texture, then prefer that instead.
2002                        //
2003                        // Ensure no PBO is bound when creating the texture storage,
2004                        // or GL will attempt to read data from there.
2005                        let mut texture = reused_textures.pop_front().unwrap_or(None).unwrap_or_else(|| {
2006                            self.device.create_texture(
2007                                info.target,
2008                                info.format,
2009                                info.width,
2010                                info.height,
2011                                info.filter,
2012                                // This needs to be a render target because some render
2013                                // tasks get rendered into the texture cache.
2014                                Some(RenderTargetInfo { has_depth: info.has_depth }),
2015                            )
2016                        });
2017
2018                        if info.is_shared_cache {
2019                            texture.flags_mut()
2020                                .insert(TextureFlags::IS_SHARED_TEXTURE_CACHE);
2021
2022                            // On Mali-Gxx devices we use batched texture uploads as it performs much better.
2023                            // However, due to another driver bug we must ensure the textures are fully cleared,
2024                            // otherwise we get visual artefacts when blitting to the texture cache.
2025                            if self.device.use_batched_texture_uploads() &&
2026                                !self.device.get_capabilities().supports_render_target_partial_update
2027                            {
2028                                self.clear_texture(&texture, [0.0; 4]);
2029                            }
2030
2031                            // Textures in the cache generally don't need to be cleared,
2032                            // but we do so if the debug display is active to make it
2033                            // easier to identify unallocated regions.
2034                            if self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
2035                                self.clear_texture(&texture, TEXTURE_CACHE_DBG_CLEAR_COLOR);
2036                            }
2037                        }
2038
2039                        create_cache_texture_time += precise_time_ns() - create_cache_texture_start;
2040
2041                        self.texture_resolver.texture_cache_map.insert(allocation.id, CacheTexture {
2042                            texture,
2043                            category: info.category,
2044                        });
2045                    }
2046                    TextureCacheAllocationKind::Free => {}
2047                };
2048            }
2049
2050            upload_to_texture_cache(self, update_list.updates);
2051
2052            self.check_gl_errors();
2053        }
2054
2055        if create_cache_texture_time > 0 {
2056            self.profile.set(
2057                profiler::CREATE_CACHE_TEXTURE_TIME,
2058                profiler::ns_to_ms(create_cache_texture_time)
2059            );
2060        }
2061        if delete_cache_texture_time > 0 {
2062            self.profile.set(
2063                profiler::DELETE_CACHE_TEXTURE_TIME,
2064                profiler::ns_to_ms(delete_cache_texture_time)
2065            )
2066        }
2067
2068        let t = self.profile.end_time(profiler::TEXTURE_CACHE_UPDATE_TIME);
2069        self.resource_upload_time += t;
2070        Telemetry::record_texture_cache_update_time(Duration::from_micros((t * 1000.00) as u64));
2071
2072        drain_filter(
2073            &mut self.notifications,
2074            |n| { n.when() == Checkpoint::FrameTexturesUpdated },
2075            |n| { n.notify(); },
2076        );
2077    }
2078
2079    fn check_gl_errors(&mut self) {
2080        let err = self.device.gl().get_error();
2081        if err == gl::OUT_OF_MEMORY {
2082            self.renderer_errors.push(RendererError::OutOfMemory);
2083        }
2084
2085        // Probably should check for other errors?
2086    }
2087
2088    fn bind_textures(&mut self, textures: &BatchTextures) {
2089        for i in 0 .. 3 {
2090            self.texture_resolver.bind(
2091                &textures.input.colors[i],
2092                TextureSampler::color(i),
2093                &mut self.device,
2094            );
2095        }
2096
2097        self.texture_resolver.bind(
2098            &textures.clip_mask,
2099            TextureSampler::ClipMask,
2100            &mut self.device,
2101        );
2102
2103        // TODO: this probably isn't the best place for this.
2104        if let Some(ref texture) = self.dither_matrix_texture {
2105            self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
2106        }
2107    }
2108
2109    fn draw_instanced_batch<T: Clone>(
2110        &mut self,
2111        data: &[T],
2112        vertex_array_kind: VertexArrayKind,
2113        textures: &BatchTextures,
2114        stats: &mut RendererStats,
2115    ) {
2116        self.bind_textures(textures);
2117
2118        // If we end up with an empty draw call here, that means we have
2119        // probably introduced unnecessary batch breaks during frame
2120        // building - so we should be catching this earlier and removing
2121        // the batch.
2122        debug_assert!(!data.is_empty());
2123
2124        let vao = &self.vaos[vertex_array_kind];
2125        self.device.bind_vao(vao);
2126
2127        let chunk_size = if self.debug_flags.contains(DebugFlags::DISABLE_BATCHING) {
2128            1
2129        } else if vertex_array_kind == VertexArrayKind::Primitive {
2130            self.max_primitive_instance_count
2131        } else {
2132            data.len()
2133        };
2134
2135        for chunk in data.chunks(chunk_size) {
2136            if self.enable_instancing {
2137                self.device
2138                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, None);
2139                self.device
2140                    .draw_indexed_triangles_instanced_u16(6, chunk.len() as i32);
2141            } else {
2142                self.device
2143                    .update_vao_instances(vao, chunk, ONE_TIME_USAGE_HINT, NonZeroUsize::new(4));
2144                self.device
2145                    .draw_indexed_triangles(6 * chunk.len() as i32);
2146            }
2147            self.profile.inc(profiler::DRAW_CALLS);
2148            stats.total_draw_calls += 1;
2149        }
2150
2151        self.profile.add(profiler::VERTICES, 6 * data.len());
2152    }
2153
2154    fn handle_readback_composite(
2155        &mut self,
2156        draw_target: DrawTarget,
2157        uses_scissor: bool,
2158        backdrop: &RenderTask,
2159        readback: &RenderTask,
2160    ) {
2161        // Extract the rectangle in the backdrop surface's device space of where
2162        // we need to read from.
2163        let readback_origin = match readback.kind {
2164            RenderTaskKind::Readback(ReadbackTask { readback_origin: Some(o), .. }) => o,
2165            RenderTaskKind::Readback(ReadbackTask { readback_origin: None, .. }) => {
2166                // If this is a dummy readback, just early out. We know that the
2167                // clear of the target will ensure the task rect is already zero alpha,
2168                // so it won't affect the rendering output.
2169                return;
2170            }
2171            _ => unreachable!(),
2172        };
2173
2174        if uses_scissor {
2175            self.device.disable_scissor();
2176        }
2177
2178        let texture_source = TextureSource::TextureCache(
2179            readback.get_target_texture(),
2180            Swizzle::default(),
2181        );
2182        let (cache_texture, _) = self.texture_resolver
2183            .resolve(&texture_source).expect("bug: no source texture");
2184
2185        // Before submitting the composite batch, do the
2186        // framebuffer readbacks that are needed for each
2187        // composite operation in this batch.
2188        let readback_rect = readback.get_target_rect();
2189        let backdrop_rect = backdrop.get_target_rect();
2190        let (backdrop_screen_origin, _) = match backdrop.kind {
2191            RenderTaskKind::Picture(ref task_info) => (task_info.content_origin, task_info.device_pixel_scale),
2192            _ => panic!("bug: composite on non-picture?"),
2193        };
2194
2195        // Bind the FBO to blit the backdrop to.
2196        // Called per-instance in case the FBO changes. The device will skip
2197        // the GL call if the requested target is already bound.
2198        let cache_draw_target = DrawTarget::from_texture(
2199            cache_texture,
2200            false,
2201        );
2202
2203        // Get the rect that we ideally want, in space of the parent surface
2204        let wanted_rect = DeviceRect::from_origin_and_size(
2205            readback_origin,
2206            readback_rect.size().to_f32(),
2207        );
2208
2209        // Get the rect that is available on the parent surface. It may be smaller
2210        // than desired because this is a picture cache tile covering only part of
2211        // the wanted rect and/or because the parent surface was clipped.
2212        let avail_rect = DeviceRect::from_origin_and_size(
2213            backdrop_screen_origin,
2214            backdrop_rect.size().to_f32(),
2215        );
2216
2217        if let Some(int_rect) = wanted_rect.intersection(&avail_rect) {
2218            // If there is a valid intersection, work out the correct origins and
2219            // sizes of the copy rects, and do the blit.
2220            let copy_size = int_rect.size().to_i32();
2221
2222            let src_origin = backdrop_rect.min.to_f32() +
2223                int_rect.min.to_vector() -
2224                backdrop_screen_origin.to_vector();
2225
2226            let src = DeviceIntRect::from_origin_and_size(
2227                src_origin.to_i32(),
2228                copy_size,
2229            );
2230
2231            let dest_origin = readback_rect.min.to_f32() +
2232                int_rect.min.to_vector() -
2233                readback_origin.to_vector();
2234
2235            let dest = DeviceIntRect::from_origin_and_size(
2236                dest_origin.to_i32(),
2237                copy_size,
2238            );
2239
2240            // Should always be drawing to picture cache tiles or off-screen surface!
2241            debug_assert!(!draw_target.is_default());
2242            let device_to_framebuffer = Scale::new(1i32);
2243
2244            self.device.blit_render_target(
2245                draw_target.into(),
2246                src * device_to_framebuffer,
2247                cache_draw_target,
2248                dest * device_to_framebuffer,
2249                TextureFilter::Linear,
2250            );
2251        }
2252
2253        // Restore draw target to current pass render target, and reset
2254        // the read target.
2255        self.device.bind_draw_target(draw_target);
2256        self.device.reset_read_target();
2257
2258        if uses_scissor {
2259            self.device.enable_scissor();
2260        }
2261    }
2262
2263    fn handle_resolves(
2264        &mut self,
2265        resolve_ops: &[ResolveOp],
2266        render_tasks: &RenderTaskGraph,
2267        draw_target: DrawTarget,
2268    ) {
2269        if resolve_ops.is_empty() {
2270            return;
2271        }
2272
2273        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
2274
2275        for resolve_op in resolve_ops {
2276            self.handle_resolve(
2277                resolve_op,
2278                render_tasks,
2279                draw_target,
2280            );
2281        }
2282
2283        self.device.reset_read_target();
2284    }
2285
2286    fn handle_prims(
2287        &mut self,
2288        draw_target: &DrawTarget,
2289        prim_instances: &[FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>],
2290        prim_instances_with_scissor: &FastHashMap<(DeviceIntRect, PatternKind), FastHashMap<TextureSource, FrameVec<PrimitiveInstanceData>>>,
2291        projection: &default::Transform3D<f32>,
2292        stats: &mut RendererStats,
2293    ) {
2294        self.device.disable_depth_write();
2295
2296        {
2297            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_PRIM);
2298
2299            self.set_blend(false, FramebufferKind::Other);
2300
2301            for (pattern_idx, prim_instances_map) in prim_instances.iter().enumerate() {
2302                if prim_instances_map.is_empty() {
2303                    continue;
2304                }
2305                let pattern = PatternKind::from_u32(pattern_idx as u32);
2306
2307                self.shaders.borrow_mut().get_quad_shader(pattern).bind(
2308                    &mut self.device,
2309                    projection,
2310                    None,
2311                    &mut self.renderer_errors,
2312                    &mut self.profile,
2313                );
2314
2315                for (texture_source, prim_instances) in prim_instances_map {
2316                    let texture_bindings = BatchTextures::composite_rgb(*texture_source);
2317
2318                    self.draw_instanced_batch(
2319                        prim_instances,
2320                        VertexArrayKind::Primitive,
2321                        &texture_bindings,
2322                        stats,
2323                    );
2324                }
2325            }
2326
2327            if !prim_instances_with_scissor.is_empty() {
2328                self.set_blend(true, FramebufferKind::Other);
2329                self.device.set_blend_mode_premultiplied_alpha();
2330                self.device.enable_scissor();
2331
2332                let mut prev_pattern = None;
2333
2334                for ((scissor_rect, pattern), prim_instances_map) in prim_instances_with_scissor {
2335                    if prev_pattern != Some(*pattern) {
2336                        prev_pattern = Some(*pattern);
2337                        self.shaders.borrow_mut().get_quad_shader(*pattern).bind(
2338                            &mut self.device,
2339                            projection,
2340                            None,
2341                            &mut self.renderer_errors,
2342                            &mut self.profile,
2343                        );
2344                    }
2345
2346                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2347
2348                    for (texture_source, prim_instances) in prim_instances_map {
2349                        let texture_bindings = BatchTextures::composite_rgb(*texture_source);
2350
2351                        self.draw_instanced_batch(
2352                            prim_instances,
2353                            VertexArrayKind::Primitive,
2354                            &texture_bindings,
2355                            stats,
2356                        );
2357                    }
2358                }
2359
2360                self.device.disable_scissor();
2361            }
2362        }
2363    }
2364
2365    fn handle_clips(
2366        &mut self,
2367        draw_target: &DrawTarget,
2368        masks: &ClipMaskInstanceList,
2369        projection: &default::Transform3D<f32>,
2370        stats: &mut RendererStats,
2371    ) {
2372        self.device.disable_depth_write();
2373
2374        {
2375            let _timer = self.gpu_profiler.start_timer(GPU_TAG_INDIRECT_MASK);
2376
2377            self.set_blend(true, FramebufferKind::Other);
2378            self.set_blend_mode_multiply(FramebufferKind::Other);
2379
2380            if !masks.mask_instances_fast.is_empty() {
2381                self.shaders.borrow_mut().ps_mask_fast().bind(
2382                    &mut self.device,
2383                    projection,
2384                    None,
2385                    &mut self.renderer_errors,
2386                    &mut self.profile,
2387                );
2388
2389                self.draw_instanced_batch(
2390                    &masks.mask_instances_fast,
2391                    VertexArrayKind::Mask,
2392                    &BatchTextures::empty(),
2393                    stats,
2394                );
2395            }
2396
2397            if !masks.mask_instances_fast_with_scissor.is_empty() {
2398                self.shaders.borrow_mut().ps_mask_fast().bind(
2399                    &mut self.device,
2400                    projection,
2401                    None,
2402                    &mut self.renderer_errors,
2403                    &mut self.profile,
2404                );
2405
2406                self.device.enable_scissor();
2407
2408                for (scissor_rect, instances) in &masks.mask_instances_fast_with_scissor {
2409                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2410
2411                    self.draw_instanced_batch(
2412                        instances,
2413                        VertexArrayKind::Mask,
2414                        &BatchTextures::empty(),
2415                        stats,
2416                    );
2417                }
2418
2419                self.device.disable_scissor();
2420            }
2421
2422            if !masks.image_mask_instances.is_empty() {
2423                self.shaders.borrow_mut().ps_quad_textured().bind(
2424                    &mut self.device,
2425                    projection,
2426                    None,
2427                    &mut self.renderer_errors,
2428                    &mut self.profile,
2429                );
2430
2431                for (texture, prim_instances) in &masks.image_mask_instances {
2432                    self.draw_instanced_batch(
2433                        prim_instances,
2434                        VertexArrayKind::Primitive,
2435                        &BatchTextures::composite_rgb(*texture),
2436                        stats,
2437                    );
2438                }
2439            }
2440
2441            if !masks.image_mask_instances_with_scissor.is_empty() {
2442                self.device.enable_scissor();
2443
2444                self.shaders.borrow_mut().ps_quad_textured().bind(
2445                    &mut self.device,
2446                    projection,
2447                    None,
2448                    &mut self.renderer_errors,
2449                    &mut self.profile,
2450                );
2451
2452                for ((scissor_rect, texture), prim_instances) in &masks.image_mask_instances_with_scissor {
2453                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2454
2455                    self.draw_instanced_batch(
2456                        prim_instances,
2457                        VertexArrayKind::Primitive,
2458                        &BatchTextures::composite_rgb(*texture),
2459                        stats,
2460                    );
2461                }
2462
2463                self.device.disable_scissor();
2464            }
2465
2466            if !masks.mask_instances_slow.is_empty() {
2467                self.shaders.borrow_mut().ps_mask().bind(
2468                    &mut self.device,
2469                    projection,
2470                    None,
2471                    &mut self.renderer_errors,
2472                    &mut self.profile,
2473                );
2474
2475                self.draw_instanced_batch(
2476                    &masks.mask_instances_slow,
2477                    VertexArrayKind::Mask,
2478                    &BatchTextures::empty(),
2479                    stats,
2480                );
2481            }
2482
2483            if !masks.mask_instances_slow_with_scissor.is_empty() {
2484                self.shaders.borrow_mut().ps_mask().bind(
2485                    &mut self.device,
2486                    projection,
2487                    None,
2488                    &mut self.renderer_errors,
2489                    &mut self.profile,
2490                );
2491
2492                self.device.enable_scissor();
2493
2494                for (scissor_rect, instances) in &masks.mask_instances_slow_with_scissor {
2495                    self.device.set_scissor_rect(draw_target.to_framebuffer_rect(*scissor_rect));
2496
2497                    self.draw_instanced_batch(
2498                        instances,
2499                        VertexArrayKind::Mask,
2500                        &BatchTextures::empty(),
2501                        stats,
2502                    );
2503                }
2504
2505                self.device.disable_scissor();
2506            }
2507        }
2508    }
2509
2510    fn handle_blits(
2511        &mut self,
2512        blits: &[BlitJob],
2513        render_tasks: &RenderTaskGraph,
2514        draw_target: DrawTarget,
2515    ) {
2516        if blits.is_empty() {
2517            return;
2518        }
2519
2520        let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLIT);
2521
2522        // TODO(gw): For now, we don't bother batching these by source texture.
2523        //           If if ever shows up as an issue, we can easily batch them.
2524        for blit in blits {
2525            let (source, source_rect) = {
2526                // A blit from the child render task into this target.
2527                // TODO(gw): Support R8 format here once we start
2528                //           creating mips for alpha masks.
2529                let task = &render_tasks[blit.source];
2530                let source_rect = blit.source_rect.translate(task.get_target_rect().min.to_vector());
2531                let source_texture = task.get_texture_source();
2532
2533                (source_texture, source_rect)
2534            };
2535
2536            let (texture, swizzle) = self.texture_resolver
2537                .resolve(&source)
2538                .expect("BUG: invalid source texture");
2539
2540            if swizzle != Swizzle::default() {
2541                error!("Swizzle {:?} can't be handled by a blit", swizzle);
2542            }
2543
2544            let read_target = DrawTarget::from_texture(
2545                texture,
2546                false,
2547            );
2548
2549            self.device.blit_render_target(
2550                read_target.into(),
2551                read_target.to_framebuffer_rect(source_rect),
2552                draw_target,
2553                draw_target.to_framebuffer_rect(blit.target_rect),
2554                TextureFilter::Linear,
2555            );
2556        }
2557    }
2558
2559    fn handle_scaling(
2560        &mut self,
2561        scalings: &FastHashMap<TextureSource, FrameVec<ScalingInstance>>,
2562        projection: &default::Transform3D<f32>,
2563        stats: &mut RendererStats,
2564    ) {
2565        if scalings.is_empty() {
2566            return
2567        }
2568
2569        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SCALE);
2570        for (source, instances) in scalings {
2571            let buffer_kind = source.image_buffer_kind();
2572
2573            // When the source texture is an external texture, the UV rect is not known
2574            // when the external surface descriptor is created, because external textures
2575            // are not resolved until the lock() callback is invoked at the start of the
2576            // frame render. We must therefore override the source rects now.
2577            let uv_override_instances;
2578            let instances = match source {
2579                TextureSource::External(..) => {
2580                    uv_override_instances = instances.iter().map(|instance| {
2581                        let mut new_instance = instance.clone();
2582                        let texel_rect: TexelRect = self.texture_resolver.get_uv_rect(
2583                            &source,
2584                            instance.source_rect.cast().into()
2585                        ).into();
2586                        new_instance.source_rect = DeviceRect::new(texel_rect.uv0, texel_rect.uv1);
2587                        new_instance
2588                    }).collect::<Vec<_>>();
2589                    uv_override_instances.as_slice()
2590                }
2591                _ => instances.as_slice()
2592            };
2593
2594            self.shaders
2595                .borrow_mut()
2596                .get_scale_shader(buffer_kind)
2597                .bind(
2598                    &mut self.device,
2599                    &projection,
2600                    Some(self.texture_resolver.get_texture_size(source).to_f32()),
2601                    &mut self.renderer_errors,
2602                    &mut self.profile,
2603                );
2604
2605            self.draw_instanced_batch(
2606                instances,
2607                VertexArrayKind::Scale,
2608                &BatchTextures::composite_rgb(*source),
2609                stats,
2610            );
2611        }
2612    }
2613
2614    fn handle_svg_filters(
2615        &mut self,
2616        textures: &BatchTextures,
2617        svg_filters: &[SvgFilterInstance],
2618        projection: &default::Transform3D<f32>,
2619        stats: &mut RendererStats,
2620    ) {
2621        if svg_filters.is_empty() {
2622            return;
2623        }
2624
2625        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER);
2626
2627        self.shaders.borrow_mut().cs_svg_filter().bind(
2628            &mut self.device,
2629            &projection,
2630            None,
2631            &mut self.renderer_errors,
2632            &mut self.profile,
2633        );
2634
2635        self.draw_instanced_batch(
2636            &svg_filters,
2637            VertexArrayKind::SvgFilter,
2638            textures,
2639            stats,
2640        );
2641    }
2642
2643    fn handle_svg_nodes(
2644        &mut self,
2645        textures: &BatchTextures,
2646        svg_filters: &[SVGFEFilterInstance],
2647        projection: &default::Transform3D<f32>,
2648        stats: &mut RendererStats,
2649    ) {
2650        if svg_filters.is_empty() {
2651            return;
2652        }
2653
2654        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SVG_FILTER_NODES);
2655
2656        self.shaders.borrow_mut().cs_svg_filter_node().bind(
2657            &mut self.device,
2658            &projection,
2659            None,
2660            &mut self.renderer_errors,
2661            &mut self.profile,
2662        );
2663
2664        self.draw_instanced_batch(
2665            &svg_filters,
2666            VertexArrayKind::SvgFilterNode,
2667            textures,
2668            stats,
2669        );
2670    }
2671
2672    fn handle_resolve(
2673        &mut self,
2674        resolve_op: &ResolveOp,
2675        render_tasks: &RenderTaskGraph,
2676        draw_target: DrawTarget,
2677    ) {
2678        for src_task_id in &resolve_op.src_task_ids {
2679            let src_task = &render_tasks[*src_task_id];
2680            let src_info = match src_task.kind {
2681                RenderTaskKind::Picture(ref info) => info,
2682                _ => panic!("bug: not a picture"),
2683            };
2684            let src_task_rect = src_task.get_target_rect().to_f32();
2685
2686            let dest_task = &render_tasks[resolve_op.dest_task_id];
2687            let dest_info = match dest_task.kind {
2688                RenderTaskKind::Picture(ref info) => info,
2689                _ => panic!("bug: not a picture"),
2690            };
2691            let dest_task_rect = dest_task.get_target_rect().to_f32();
2692
2693            // Get the rect that we ideally want, in space of the parent surface
2694            let wanted_rect = DeviceRect::from_origin_and_size(
2695                dest_info.content_origin,
2696                dest_task_rect.size().to_f32(),
2697            ).cast_unit() * dest_info.device_pixel_scale.inverse();
2698
2699            // Get the rect that is available on the parent surface. It may be smaller
2700            // than desired because this is a picture cache tile covering only part of
2701            // the wanted rect and/or because the parent surface was clipped.
2702            let avail_rect = DeviceRect::from_origin_and_size(
2703                src_info.content_origin,
2704                src_task_rect.size().to_f32(),
2705            ).cast_unit() * src_info.device_pixel_scale.inverse();
2706
2707            if let Some(device_int_rect) = wanted_rect.intersection(&avail_rect) {
2708                let src_int_rect = (device_int_rect * src_info.device_pixel_scale).cast_unit();
2709                let dest_int_rect = (device_int_rect * dest_info.device_pixel_scale).cast_unit();
2710
2711                // If there is a valid intersection, work out the correct origins and
2712                // sizes of the copy rects, and do the blit.
2713
2714                let src_origin = src_task_rect.min.to_f32() +
2715                    src_int_rect.min.to_vector() -
2716                    src_info.content_origin.to_vector();
2717
2718                let src = DeviceIntRect::from_origin_and_size(
2719                    src_origin.to_i32(),
2720                    src_int_rect.size().round().to_i32(),
2721                );
2722
2723                let dest_origin = dest_task_rect.min.to_f32() +
2724                    dest_int_rect.min.to_vector() -
2725                    dest_info.content_origin.to_vector();
2726
2727                let dest = DeviceIntRect::from_origin_and_size(
2728                    dest_origin.to_i32(),
2729                    dest_int_rect.size().round().to_i32(),
2730                );
2731
2732                let texture_source = TextureSource::TextureCache(
2733                    src_task.get_target_texture(),
2734                    Swizzle::default(),
2735                );
2736                let (cache_texture, _) = self.texture_resolver
2737                    .resolve(&texture_source).expect("bug: no source texture");
2738
2739                let read_target = ReadTarget::from_texture(cache_texture);
2740
2741                // Should always be drawing to picture cache tiles or off-screen surface!
2742                debug_assert!(!draw_target.is_default());
2743                let device_to_framebuffer = Scale::new(1i32);
2744
2745                self.device.blit_render_target(
2746                    read_target,
2747                    src * device_to_framebuffer,
2748                    draw_target,
2749                    dest * device_to_framebuffer,
2750                    TextureFilter::Linear,
2751                );
2752            }
2753        }
2754    }
2755
2756    fn draw_picture_cache_target(
2757        &mut self,
2758        target: &PictureCacheTarget,
2759        draw_target: DrawTarget,
2760        projection: &default::Transform3D<f32>,
2761        render_tasks: &RenderTaskGraph,
2762        stats: &mut RendererStats,
2763    ) {
2764        profile_scope!("draw_picture_cache_target");
2765
2766        self.profile.inc(profiler::RENDERED_PICTURE_TILES);
2767        let _gm = self.gpu_profiler.start_marker("picture cache target");
2768        let framebuffer_kind = FramebufferKind::Other;
2769
2770        {
2771            let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
2772            self.device.bind_draw_target(draw_target);
2773
2774            if self.device.get_capabilities().supports_qcom_tiled_rendering {
2775                self.device.gl().start_tiling_qcom(
2776                    target.dirty_rect.min.x.max(0) as _,
2777                    target.dirty_rect.min.y.max(0) as _,
2778                    target.dirty_rect.width() as _,
2779                    target.dirty_rect.height() as _,
2780                    0,
2781                );
2782            }
2783
2784            self.device.enable_depth_write();
2785            self.set_blend(false, framebuffer_kind);
2786
2787            let clear_color = target.clear_color.map(|c| c.to_array());
2788            let scissor_rect = if self.device.get_capabilities().supports_render_target_partial_update
2789                && (target.dirty_rect != target.valid_rect
2790                    || self.device.get_capabilities().prefers_clear_scissor)
2791            {
2792                Some(target.dirty_rect)
2793            } else {
2794                None
2795            };
2796            match scissor_rect {
2797                // If updating only a dirty rect within a picture cache target, the
2798                // clear must also be scissored to that dirty region.
2799                Some(r) if self.clear_caches_with_quads => {
2800                    self.device.enable_depth(DepthFunction::Always);
2801                    // Save the draw call count so that our reftests don't get confused...
2802                    let old_draw_call_count = stats.total_draw_calls;
2803                    if clear_color.is_none() {
2804                        self.device.disable_color_write();
2805                    }
2806                    let instance = ClearInstance {
2807                        rect: [
2808                            r.min.x as f32, r.min.y as f32,
2809                            r.max.x as f32, r.max.y as f32,
2810                        ],
2811                        color: clear_color.unwrap_or([0.0; 4]),
2812                    };
2813                    self.shaders.borrow_mut().ps_clear().bind(
2814                        &mut self.device,
2815                        &projection,
2816                        None,
2817                        &mut self.renderer_errors,
2818                        &mut self.profile,
2819                    );
2820                    self.draw_instanced_batch(
2821                        &[instance],
2822                        VertexArrayKind::Clear,
2823                        &BatchTextures::empty(),
2824                        stats,
2825                    );
2826                    if clear_color.is_none() {
2827                        self.device.enable_color_write();
2828                    }
2829                    stats.total_draw_calls = old_draw_call_count;
2830                    self.device.disable_depth();
2831                }
2832                other => {
2833                    let scissor_rect = other.map(|rect| {
2834                        draw_target.build_scissor_rect(Some(rect))
2835                    });
2836                    self.device.clear_target(clear_color, Some(1.0), scissor_rect);
2837                }
2838            };
2839            self.device.disable_depth_write();
2840        }
2841
2842        match target.kind {
2843            PictureCacheTargetKind::Draw { ref alpha_batch_container } => {
2844                self.draw_alpha_batch_container(
2845                    alpha_batch_container,
2846                    draw_target,
2847                    framebuffer_kind,
2848                    projection,
2849                    render_tasks,
2850                    stats,
2851                );
2852            }
2853            PictureCacheTargetKind::Blit { task_id, sub_rect_offset } => {
2854                let src_task = &render_tasks[task_id];
2855                let (texture, _swizzle) = self.texture_resolver
2856                    .resolve(&src_task.get_texture_source())
2857                    .expect("BUG: invalid source texture");
2858
2859                let src_task_rect = src_task.get_target_rect();
2860
2861                let p0 = src_task_rect.min + sub_rect_offset;
2862                let p1 = p0 + target.dirty_rect.size();
2863                let src_rect = DeviceIntRect::new(p0, p1);
2864
2865                // TODO(gw): In future, it'd be tidier to have the draw target offset
2866                //           for DC surfaces handled by `blit_render_target`. However,
2867                //           for now they are only ever written to here.
2868                let target_rect = target
2869                    .dirty_rect
2870                    .translate(draw_target.offset().to_vector())
2871                    .cast_unit();
2872
2873                self.device.blit_render_target(
2874                    ReadTarget::from_texture(texture),
2875                    src_rect.cast_unit(),
2876                    draw_target,
2877                    target_rect,
2878                    TextureFilter::Nearest,
2879                );
2880            }
2881        }
2882
2883        self.device.invalidate_depth_target();
2884        if self.device.get_capabilities().supports_qcom_tiled_rendering {
2885            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
2886        }
2887    }
2888
2889    /// Draw an alpha batch container into a given draw target. This is used
2890    /// by both color and picture cache target kinds.
2891    fn draw_alpha_batch_container(
2892        &mut self,
2893        alpha_batch_container: &AlphaBatchContainer,
2894        draw_target: DrawTarget,
2895        framebuffer_kind: FramebufferKind,
2896        projection: &default::Transform3D<f32>,
2897        render_tasks: &RenderTaskGraph,
2898        stats: &mut RendererStats,
2899    ) {
2900        let uses_scissor = alpha_batch_container.task_scissor_rect.is_some();
2901
2902        if uses_scissor {
2903            self.device.enable_scissor();
2904            let scissor_rect = draw_target.build_scissor_rect(
2905                alpha_batch_container.task_scissor_rect,
2906            );
2907            self.device.set_scissor_rect(scissor_rect)
2908        }
2909
2910        if !alpha_batch_container.opaque_batches.is_empty()
2911            && !self.debug_flags.contains(DebugFlags::DISABLE_OPAQUE_PASS) {
2912            let _gl = self.gpu_profiler.start_marker("opaque batches");
2913            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
2914            self.set_blend(false, framebuffer_kind);
2915            //Note: depth equality is needed for split planes
2916            self.device.enable_depth(DepthFunction::LessEqual);
2917            self.device.enable_depth_write();
2918
2919            // Draw opaque batches front-to-back for maximum
2920            // z-buffer efficiency!
2921            for batch in alpha_batch_container
2922                .opaque_batches
2923                .iter()
2924                .rev()
2925                {
2926                    if should_skip_batch(&batch.key.kind, self.debug_flags) {
2927                        continue;
2928                    }
2929
2930                    self.shaders.borrow_mut()
2931                        .get(&batch.key, batch.features, self.debug_flags, &self.device)
2932                        .bind(
2933                            &mut self.device, projection, None,
2934                            &mut self.renderer_errors,
2935                            &mut self.profile,
2936                        );
2937
2938                    let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
2939                    self.draw_instanced_batch(
2940                        &batch.instances,
2941                        VertexArrayKind::Primitive,
2942                        &batch.key.textures,
2943                        stats
2944                    );
2945                }
2946
2947            self.device.disable_depth_write();
2948            self.gpu_profiler.finish_sampler(opaque_sampler);
2949        } else {
2950            self.device.disable_depth();
2951        }
2952
2953        if !alpha_batch_container.alpha_batches.is_empty()
2954            && !self.debug_flags.contains(DebugFlags::DISABLE_ALPHA_PASS) {
2955            let _gl = self.gpu_profiler.start_marker("alpha batches");
2956            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
2957            self.set_blend(true, framebuffer_kind);
2958
2959            let mut prev_blend_mode = BlendMode::None;
2960            let shaders_rc = self.shaders.clone();
2961
2962            for batch in &alpha_batch_container.alpha_batches {
2963                if should_skip_batch(&batch.key.kind, self.debug_flags) {
2964                    continue;
2965                }
2966
2967                let mut shaders = shaders_rc.borrow_mut();
2968                let shader = shaders.get(
2969                    &batch.key,
2970                    batch.features | BatchFeatures::ALPHA_PASS,
2971                    self.debug_flags,
2972                    &self.device,
2973                );
2974
2975                if batch.key.blend_mode != prev_blend_mode {
2976                    match batch.key.blend_mode {
2977                        _ if self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) &&
2978                            framebuffer_kind == FramebufferKind::Main => {
2979                            self.device.set_blend_mode_show_overdraw();
2980                        }
2981                        BlendMode::None => {
2982                            unreachable!("bug: opaque blend in alpha pass");
2983                        }
2984                        BlendMode::Alpha => {
2985                            self.device.set_blend_mode_alpha();
2986                        }
2987                        BlendMode::PremultipliedAlpha => {
2988                            self.device.set_blend_mode_premultiplied_alpha();
2989                        }
2990                        BlendMode::PremultipliedDestOut => {
2991                            self.device.set_blend_mode_premultiplied_dest_out();
2992                        }
2993                        BlendMode::SubpixelDualSource => {
2994                            self.device.set_blend_mode_subpixel_dual_source();
2995                        }
2996                        BlendMode::Advanced(mode) => {
2997                            if self.enable_advanced_blend_barriers {
2998                                self.device.gl().blend_barrier_khr();
2999                            }
3000                            self.device.set_blend_mode_advanced(mode);
3001                        }
3002                        BlendMode::MultiplyDualSource => {
3003                            self.device.set_blend_mode_multiply_dual_source();
3004                        }
3005                        BlendMode::Screen => {
3006                            self.device.set_blend_mode_screen();
3007                        }
3008                        BlendMode::Exclusion => {
3009                            self.device.set_blend_mode_exclusion();
3010                        }
3011                        BlendMode::PlusLighter => {
3012                            self.device.set_blend_mode_plus_lighter();
3013                        }
3014                    }
3015                    prev_blend_mode = batch.key.blend_mode;
3016                }
3017
3018                // Handle special case readback for composites.
3019                if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, backdrop_id }) = batch.key.kind {
3020                    // composites can't be grouped together because
3021                    // they may overlap and affect each other.
3022                    debug_assert_eq!(batch.instances.len(), 1);
3023                    self.handle_readback_composite(
3024                        draw_target,
3025                        uses_scissor,
3026                        &render_tasks[task_id],
3027                        &render_tasks[backdrop_id],
3028                    );
3029                }
3030
3031                let _timer = self.gpu_profiler.start_timer(batch.key.kind.sampler_tag());
3032                shader.bind(
3033                    &mut self.device,
3034                    projection,
3035                    None,
3036                    &mut self.renderer_errors,
3037                    &mut self.profile,
3038                );
3039
3040                self.draw_instanced_batch(
3041                    &batch.instances,
3042                    VertexArrayKind::Primitive,
3043                    &batch.key.textures,
3044                    stats
3045                );
3046            }
3047
3048            self.set_blend(false, framebuffer_kind);
3049            self.gpu_profiler.finish_sampler(transparent_sampler);
3050        }
3051
3052        self.device.disable_depth();
3053        if uses_scissor {
3054            self.device.disable_scissor();
3055        }
3056    }
3057
3058    /// Rasterize any external compositor surfaces that require updating
3059    fn update_external_native_surfaces(
3060        &mut self,
3061        external_surfaces: &[ResolvedExternalSurface],
3062        results: &mut RenderResults,
3063    ) {
3064        if external_surfaces.is_empty() {
3065            return;
3066        }
3067
3068        let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3069
3070        self.device.disable_depth();
3071        self.set_blend(false, FramebufferKind::Main);
3072
3073        for surface in external_surfaces {
3074            // See if this surface needs to be updated
3075            let (native_surface_id, surface_size) = match surface.update_params {
3076                Some(params) => params,
3077                None => continue,
3078            };
3079
3080            // When updating an external surface, the entire surface rect is used
3081            // for all of the draw, dirty, valid and clip rect parameters.
3082            let surface_rect = surface_size.into();
3083
3084            // Bind the native compositor surface to update
3085            let surface_info = self.compositor_config
3086                .compositor()
3087                .unwrap()
3088                .bind(
3089                    &mut self.device,
3090                    NativeTileId {
3091                        surface_id: native_surface_id,
3092                        x: 0,
3093                        y: 0,
3094                    },
3095                    surface_rect,
3096                    surface_rect,
3097                );
3098
3099            // Bind the native surface to current FBO target
3100            let draw_target = DrawTarget::NativeSurface {
3101                offset: surface_info.origin,
3102                external_fbo_id: surface_info.fbo_id,
3103                dimensions: surface_size,
3104            };
3105            self.device.bind_draw_target(draw_target);
3106
3107            let projection = Transform3D::ortho(
3108                0.0,
3109                surface_size.width as f32,
3110                0.0,
3111                surface_size.height as f32,
3112                self.device.ortho_near_plane(),
3113                self.device.ortho_far_plane(),
3114            );
3115
3116            let ( textures, instance ) = match surface.color_data {
3117                ResolvedExternalSurfaceColorData::Yuv{
3118                        ref planes, color_space, format, channel_bit_depth, .. } => {
3119
3120                    let textures = BatchTextures::composite_yuv(
3121                        planes[0].texture,
3122                        planes[1].texture,
3123                        planes[2].texture,
3124                    );
3125
3126                    // When the texture is an external texture, the UV rect is not known when
3127                    // the external surface descriptor is created, because external textures
3128                    // are not resolved until the lock() callback is invoked at the start of
3129                    // the frame render. To handle this, query the texture resolver for the
3130                    // UV rect if it's an external texture, otherwise use the default UV rect.
3131                    let uv_rects = [
3132                        self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3133                        self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3134                        self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3135                    ];
3136
3137                    let instance = CompositeInstance::new_yuv(
3138                        surface_rect.to_f32(),
3139                        surface_rect.to_f32(),
3140                        // z-id is not relevant when updating a native compositor surface.
3141                        // TODO(gw): Support compositor surfaces without z-buffer, for memory / perf win here.
3142                        color_space,
3143                        format,
3144                        channel_bit_depth,
3145                        uv_rects,
3146                        (false, false),
3147                        None,
3148                    );
3149
3150                    // Bind an appropriate YUV shader for the texture format kind
3151                    self.shaders
3152                        .borrow_mut()
3153                        .get_composite_shader(
3154                            CompositeSurfaceFormat::Yuv,
3155                            surface.image_buffer_kind,
3156                            instance.get_yuv_features(),
3157                        ).bind(
3158                            &mut self.device,
3159                            &projection,
3160                            None,
3161                            &mut self.renderer_errors,
3162                            &mut self.profile,
3163                        );
3164
3165                    ( textures, instance )
3166                },
3167                ResolvedExternalSurfaceColorData::Rgb{ ref plane, .. } => {
3168                    let textures = BatchTextures::composite_rgb(plane.texture);
3169                    let uv_rect = self.texture_resolver.get_uv_rect(&textures.input.colors[0], plane.uv_rect);
3170                    let instance = CompositeInstance::new_rgb(
3171                        surface_rect.to_f32(),
3172                        surface_rect.to_f32(),
3173                        PremultipliedColorF::WHITE,
3174                        uv_rect,
3175                        plane.texture.uses_normalized_uvs(),
3176                        (false, false),
3177                        None,
3178                    );
3179                    let features = instance.get_rgb_features();
3180
3181                    self.shaders
3182                        .borrow_mut()
3183                        .get_composite_shader(
3184                            CompositeSurfaceFormat::Rgba,
3185                            surface.image_buffer_kind,
3186                            features,
3187                        ).bind(
3188                            &mut self.device,
3189                            &projection,
3190                            None,
3191                            &mut self.renderer_errors,
3192                            &mut self.profile,
3193                        );
3194
3195                    ( textures, instance )
3196                },
3197            };
3198
3199            self.draw_instanced_batch(
3200                &[instance],
3201                VertexArrayKind::Composite,
3202                &textures,
3203                &mut results.stats,
3204            );
3205
3206            self.compositor_config
3207                .compositor()
3208                .unwrap()
3209                .unbind(&mut self.device);
3210        }
3211
3212        self.gpu_profiler.finish_sampler(opaque_sampler);
3213    }
3214
3215    /// Draw a list of tiles to the framebuffer
3216    fn draw_tile_list<'a, I: Iterator<Item = &'a occlusion::Item<OcclusionItemKey>>>(
3217        &mut self,
3218        tiles_iter: I,
3219        composite_state: &CompositeState,
3220        external_surfaces: &[ResolvedExternalSurface],
3221        projection: &default::Transform3D<f32>,
3222        stats: &mut RendererStats,
3223    ) {
3224        let mut current_shader_params = (
3225            CompositeSurfaceFormat::Rgba,
3226            ImageBufferKind::Texture2D,
3227            CompositeFeatures::empty(),
3228            None,
3229        );
3230        let mut current_textures = BatchTextures::empty();
3231        let mut instances = Vec::new();
3232
3233        self.shaders
3234            .borrow_mut()
3235            .get_composite_shader(
3236                current_shader_params.0,
3237                current_shader_params.1,
3238                current_shader_params.2,
3239            ).bind(
3240                &mut self.device,
3241                projection,
3242                None,
3243                &mut self.renderer_errors,
3244                &mut self.profile,
3245            );
3246
3247        for item in tiles_iter {
3248            let tile = &composite_state.tiles[item.key.tile_index];
3249
3250            let clip_rect = item.rectangle;
3251            let tile_rect = composite_state.get_device_rect(&tile.local_rect, tile.transform_index);
3252            let transform = composite_state.get_device_transform(tile.transform_index);
3253            let flip = (transform.scale.x < 0.0, transform.scale.y < 0.0);
3254
3255            let clip = if item.key.needs_mask {
3256                tile.clip_index.map(|index| {
3257                    composite_state.get_compositor_clip(index)
3258                })
3259            } else {
3260                None
3261            };
3262
3263            // Work out the draw params based on the tile surface
3264            let (instance, textures, shader_params) = match tile.surface {
3265                CompositeTileSurface::Color { color } => {
3266                    let dummy = TextureSource::Dummy;
3267                    let image_buffer_kind = dummy.image_buffer_kind();
3268                    let instance = CompositeInstance::new(
3269                        tile_rect,
3270                        clip_rect,
3271                        color.premultiplied(),
3272                        flip,
3273                        clip,
3274                    );
3275                    let features = instance.get_rgb_features();
3276                    (
3277                        instance,
3278                        BatchTextures::composite_rgb(dummy),
3279                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3280                    )
3281                }
3282                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::TextureCache { texture } } => {
3283                    let instance = CompositeInstance::new(
3284                        tile_rect,
3285                        clip_rect,
3286                        PremultipliedColorF::WHITE,
3287                        flip,
3288                        clip,
3289                    );
3290                    let features = instance.get_rgb_features();
3291                    (
3292                        instance,
3293                        BatchTextures::composite_rgb(texture),
3294                        (
3295                            CompositeSurfaceFormat::Rgba,
3296                            ImageBufferKind::Texture2D,
3297                            features,
3298                            None,
3299                        ),
3300                    )
3301                }
3302                CompositeTileSurface::ExternalSurface { external_surface_index } => {
3303                    let surface = &external_surfaces[external_surface_index.0];
3304
3305                    match surface.color_data {
3306                        ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, channel_bit_depth, .. } => {
3307                            let textures = BatchTextures::composite_yuv(
3308                                planes[0].texture,
3309                                planes[1].texture,
3310                                planes[2].texture,
3311                            );
3312
3313                            // When the texture is an external texture, the UV rect is not known when
3314                            // the external surface descriptor is created, because external textures
3315                            // are not resolved until the lock() callback is invoked at the start of
3316                            // the frame render. To handle this, query the texture resolver for the
3317                            // UV rect if it's an external texture, otherwise use the default UV rect.
3318                            let uv_rects = [
3319                                self.texture_resolver.get_uv_rect(&textures.input.colors[0], planes[0].uv_rect),
3320                                self.texture_resolver.get_uv_rect(&textures.input.colors[1], planes[1].uv_rect),
3321                                self.texture_resolver.get_uv_rect(&textures.input.colors[2], planes[2].uv_rect),
3322                            ];
3323
3324                            let instance = CompositeInstance::new_yuv(
3325                                tile_rect,
3326                                clip_rect,
3327                                color_space,
3328                                format,
3329                                channel_bit_depth,
3330                                uv_rects,
3331                                flip,
3332                                clip,
3333                            );
3334                            let features = instance.get_yuv_features();
3335
3336                            (
3337                                instance,
3338                                textures,
3339                                (
3340                                    CompositeSurfaceFormat::Yuv,
3341                                    surface.image_buffer_kind,
3342                                    features,
3343                                    None
3344                                ),
3345                            )
3346                        },
3347                        ResolvedExternalSurfaceColorData::Rgb { ref plane, .. } => {
3348                            let uv_rect = self.texture_resolver.get_uv_rect(&plane.texture, plane.uv_rect);
3349                            let instance = CompositeInstance::new_rgb(
3350                                tile_rect,
3351                                clip_rect,
3352                                PremultipliedColorF::WHITE,
3353                                uv_rect,
3354                                plane.texture.uses_normalized_uvs(),
3355                                flip,
3356                                clip,
3357                            );
3358                            let features = instance.get_rgb_features();
3359                            (
3360                                instance,
3361                                BatchTextures::composite_rgb(plane.texture),
3362                                (
3363                                    CompositeSurfaceFormat::Rgba,
3364                                    surface.image_buffer_kind,
3365                                    features,
3366                                    Some(self.texture_resolver.get_texture_size(&plane.texture).to_f32()),
3367                                ),
3368                            )
3369                        },
3370                    }
3371                }
3372                CompositeTileSurface::Clear => {
3373                    let dummy = TextureSource::Dummy;
3374                    let image_buffer_kind = dummy.image_buffer_kind();
3375                    let instance = CompositeInstance::new(
3376                        tile_rect,
3377                        clip_rect,
3378                        PremultipliedColorF::BLACK,
3379                        flip,
3380                        clip,
3381                    );
3382                    let features = instance.get_rgb_features();
3383                    (
3384                        instance,
3385                        BatchTextures::composite_rgb(dummy),
3386                        (CompositeSurfaceFormat::Rgba, image_buffer_kind, features, None),
3387                    )
3388                }
3389                CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { .. } } => {
3390                    unreachable!("bug: found native surface in simple composite path");
3391                }
3392            };
3393
3394            // Flush batch if shader params or textures changed
3395            let flush_batch = !current_textures.is_compatible_with(&textures) ||
3396                shader_params != current_shader_params;
3397
3398            if flush_batch {
3399                if !instances.is_empty() {
3400                    self.draw_instanced_batch(
3401                        &instances,
3402                        VertexArrayKind::Composite,
3403                        &current_textures,
3404                        stats,
3405                    );
3406                    instances.clear();
3407                }
3408            }
3409
3410            if shader_params != current_shader_params {
3411                self.shaders
3412                    .borrow_mut()
3413                    .get_composite_shader(shader_params.0, shader_params.1, shader_params.2)
3414                    .bind(
3415                        &mut self.device,
3416                        projection,
3417                        shader_params.3,
3418                        &mut self.renderer_errors,
3419                        &mut self.profile,
3420                    );
3421
3422                current_shader_params = shader_params;
3423            }
3424
3425            current_textures = textures;
3426
3427            // Add instance to current batch
3428            instances.push(instance);
3429        }
3430
3431        // Flush the last batch
3432        if !instances.is_empty() {
3433            self.draw_instanced_batch(
3434                &instances,
3435                VertexArrayKind::Composite,
3436                &current_textures,
3437                stats,
3438            );
3439        }
3440    }
3441
3442    // Composite tiles in a swapchain. When using LayerCompositor, we may
3443    // split the compositing in to multiple swapchains.
3444    fn composite_pass(
3445        &mut self,
3446        composite_state: &CompositeState,
3447        draw_target: DrawTarget,
3448        clear_color: ColorF,
3449        projection: &default::Transform3D<f32>,
3450        results: &mut RenderResults,
3451        partial_present_mode: Option<PartialPresentMode>,
3452        layer: &SwapChainLayer,
3453    ) {
3454        self.device.bind_draw_target(draw_target);
3455        self.device.disable_depth_write();
3456        self.device.disable_depth();
3457
3458        // If using KHR_partial_update, call eglSetDamageRegion.
3459        // This must be called exactly once per frame, and prior to any rendering to the main
3460        // framebuffer. Additionally, on Mali-G77 we encountered rendering issues when calling
3461        // this earlier in the frame, during offscreen render passes. So call it now, immediately
3462        // before rendering to the main framebuffer. See bug 1685276 for details.
3463        if let Some(partial_present) = self.compositor_config.partial_present() {
3464            if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
3465                partial_present.set_buffer_damage_region(&[dirty_rect.to_i32()]);
3466            }
3467        }
3468
3469        // Clear the framebuffer
3470        let clear_color = Some(clear_color.to_array());
3471
3472        match partial_present_mode {
3473            Some(PartialPresentMode::Single { dirty_rect }) => {
3474                // There is no need to clear if the dirty rect is occluded. Additionally,
3475                // on Mali-G77 we have observed artefacts when calling glClear (even with
3476                // the empty scissor rect set) after calling eglSetDamageRegion with an
3477                // empty damage region. So avoid clearing in that case. See bug 1709548.
3478                if !dirty_rect.is_empty() && layer.occlusion.test(&dirty_rect) {
3479                    // We have a single dirty rect, so clear only that
3480                    self.device.clear_target(clear_color,
3481                                             None,
3482                                             Some(draw_target.to_framebuffer_rect(dirty_rect.to_i32())));
3483                }
3484            }
3485            None => {
3486                // Partial present is disabled, so clear the entire framebuffer
3487                self.device.clear_target(clear_color,
3488                                         None,
3489                                         None);
3490            }
3491        }
3492
3493        // Draw opaque tiles
3494        let opaque_items = layer.occlusion.opaque_items();
3495        if !opaque_items.is_empty() {
3496            let opaque_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_OPAQUE);
3497            self.set_blend(false, FramebufferKind::Main);
3498            self.draw_tile_list(
3499                opaque_items.iter(),
3500                &composite_state,
3501                &composite_state.external_surfaces,
3502                projection,
3503                &mut results.stats,
3504            );
3505            self.gpu_profiler.finish_sampler(opaque_sampler);
3506        }
3507
3508        // Draw clear tiles
3509        if !layer.clear_tiles.is_empty() {
3510            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3511            self.set_blend(true, FramebufferKind::Main);
3512            self.device.set_blend_mode_premultiplied_dest_out();
3513            self.draw_tile_list(
3514                layer.clear_tiles.iter(),
3515                &composite_state,
3516                &composite_state.external_surfaces,
3517                projection,
3518                &mut results.stats,
3519            );
3520            self.gpu_profiler.finish_sampler(transparent_sampler);
3521        }
3522
3523        // Draw alpha tiles
3524        let alpha_items = layer.occlusion.alpha_items();
3525        if !alpha_items.is_empty() {
3526            let transparent_sampler = self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_TRANSPARENT);
3527            self.set_blend(true, FramebufferKind::Main);
3528            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Main);
3529            self.draw_tile_list(
3530                alpha_items.iter().rev(),
3531                &composite_state,
3532                &composite_state.external_surfaces,
3533                projection,
3534                &mut results.stats,
3535            );
3536            self.gpu_profiler.finish_sampler(transparent_sampler);
3537        }
3538    }
3539
3540    /// Composite picture cache tiles into the framebuffer. This is currently
3541    /// the only way that picture cache tiles get drawn. In future, the tiles
3542    /// will often be handed to the OS compositor, and this method will be
3543    /// rarely used.
3544    fn composite_simple(
3545        &mut self,
3546        composite_state: &CompositeState,
3547        frame_device_size: DeviceIntSize,
3548        fb_draw_target: DrawTarget,
3549        projection: &default::Transform3D<f32>,
3550        results: &mut RenderResults,
3551        partial_present_mode: Option<PartialPresentMode>,
3552        device_size: DeviceIntSize,
3553    ) {
3554        let _gm = self.gpu_profiler.start_marker("framebuffer");
3555        let _timer = self.gpu_profiler.start_timer(GPU_TAG_COMPOSITE);
3556
3557        // We are only interested in tiles backed with actual cached pixels so we don't
3558        // count clear tiles here.
3559        let num_tiles = composite_state.tiles
3560            .iter()
3561            .filter(|tile| tile.kind != TileKind::Clear).count();
3562        self.profile.set(profiler::PICTURE_TILES, num_tiles);
3563
3564        let (window_is_opaque, enable_screenshot)  = match self.compositor_config.layer_compositor() {
3565            Some(ref compositor) => {
3566                let props = compositor.get_window_properties();
3567                (props.is_opaque, props.enable_screenshot)
3568            }
3569            None => (true, true)
3570        };
3571
3572        let mut input_layers: Vec<CompositorInputLayer> = Vec::new();
3573        let mut swapchain_layers = Vec::new();
3574        let cap = composite_state.tiles.len();
3575        let mut segment_builder = SegmentBuilder::new();
3576        let mut tile_index_to_layer_index = vec![None; composite_state.tiles.len()];
3577        let mut use_external_composite = false;
3578
3579        // Calculate layers with full device rect
3580
3581        // Add a debug overlay request if enabled
3582        if self.debug_overlay_state.is_enabled {
3583            self.debug_overlay_state.layer_index = input_layers.len();
3584
3585            input_layers.push(CompositorInputLayer {
3586                usage: CompositorSurfaceUsage::DebugOverlay,
3587                is_opaque: false,
3588                offset: DeviceIntPoint::zero(),
3589                clip_rect: device_size.into(),
3590            });
3591
3592            swapchain_layers.push(SwapChainLayer {
3593                clear_tiles: Vec::new(),
3594                occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3595            });
3596        }
3597
3598        // NOTE: Tiles here are being iterated in front-to-back order by
3599        //       z-id, due to the sort in composite_state.end_frame()
3600        for (idx, tile) in composite_state.tiles.iter().enumerate() {
3601            let device_tile_box = composite_state.get_device_rect(
3602                &tile.local_rect,
3603                tile.transform_index
3604            );
3605
3606            // Simple compositor needs the valid rect in device space to match clip rect
3607            let device_valid_rect = composite_state
3608                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
3609
3610            let rect = device_tile_box
3611                .intersection_unchecked(&tile.device_clip_rect)
3612                .intersection_unchecked(&device_valid_rect);
3613
3614            if rect.is_empty() {
3615                continue;
3616            }
3617
3618            // Determine if the tile is an external surface or content
3619            let usage = match tile.surface {
3620                CompositeTileSurface::Texture { .. } |
3621                CompositeTileSurface::Color { .. } |
3622                CompositeTileSurface::Clear => {
3623                    CompositorSurfaceUsage::Content
3624                }
3625                CompositeTileSurface::ExternalSurface { external_surface_index } => {
3626                    match (self.current_compositor_kind, enable_screenshot) {
3627                        (CompositorKind::Native { .. }, _) | (CompositorKind::Draw { .. }, _) => {
3628                            CompositorSurfaceUsage::Content
3629                        }
3630                        (CompositorKind::Layer { .. }, true) => {
3631                            CompositorSurfaceUsage::Content
3632                        }
3633                        (CompositorKind::Layer { .. }, false) => {
3634                            let surface = &composite_state.external_surfaces[external_surface_index.0];
3635
3636                            // TODO(gwc): For now, we only select a hardware overlay swapchain if we
3637                            // have an external image, but it may make sense to do for compositor
3638                            // surfaces without in future.
3639                            match surface.external_image_id {
3640                                Some(external_image_id) => {
3641                                    let image_key = match surface.color_data {
3642                                        ResolvedExternalSurfaceColorData::Rgb { image_dependency, .. } => image_dependency.key,
3643                                        ResolvedExternalSurfaceColorData::Yuv { image_dependencies, .. } => image_dependencies[0].key,
3644                                    };
3645
3646                                    CompositorSurfaceUsage::External {
3647                                        image_key,
3648                                        external_image_id,
3649                                        transform_index: tile.transform_index,
3650                                    }
3651                                }
3652                                None => {
3653                                    CompositorSurfaceUsage::Content
3654                                }
3655                            }
3656                        }
3657                    }
3658                }
3659            };
3660
3661            // Determine whether we need a new layer, and if so, what kind
3662            let new_layer_kind = match input_layers.last() {
3663                Some(curr_layer) => {
3664                    match (curr_layer.usage, usage) {
3665                        // Content -> content, composite in to same layer
3666                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::Content) => None,
3667                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::Content) => Some(usage),
3668
3669                        // Switch of layer type, or video -> video, need new swapchain
3670                        (CompositorSurfaceUsage::Content, CompositorSurfaceUsage::External { .. }) |
3671                        (CompositorSurfaceUsage::External { .. }, CompositorSurfaceUsage::External { .. }) => {
3672                            // Only create a new layer if we're using LayerCompositor
3673                            match self.compositor_config {
3674                                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => None,
3675                                CompositorConfig::Layer { .. } => {
3676                                    Some(usage)
3677                                }
3678                            }
3679                        }
3680                        (CompositorSurfaceUsage::DebugOverlay, _) => {
3681                            Some(usage)
3682                        }
3683                        // Should not encounter debug layers as new layer
3684                        (_, CompositorSurfaceUsage::DebugOverlay) => {
3685                            unreachable!();
3686                        }
3687                    }
3688                }
3689                None => {
3690                    // No layers yet, so we need a new one
3691                    Some(usage)
3692                }
3693            };
3694
3695            if let Some(new_layer_kind) = new_layer_kind {
3696                let (offset, clip_rect, is_opaque) = match usage {
3697                    CompositorSurfaceUsage::Content => {
3698                        (
3699                            DeviceIntPoint::zero(),
3700                            device_size.into(),
3701                            false,      // Assume not opaque, we'll calculate this later
3702                        )
3703                    }
3704                    CompositorSurfaceUsage::External { .. } => {
3705                        use_external_composite = true;
3706                        let rect = composite_state.get_device_rect(
3707                            &tile.local_rect,
3708                            tile.transform_index
3709                        );
3710
3711                        let clip_rect = tile.device_clip_rect.to_i32();
3712                        let is_opaque = tile.kind != TileKind::Alpha;
3713
3714                        (rect.min.to_i32(), clip_rect, is_opaque)
3715                    }
3716                    CompositorSurfaceUsage::DebugOverlay => unreachable!(),
3717                };
3718
3719                input_layers.push(CompositorInputLayer {
3720                    usage: new_layer_kind,
3721                    is_opaque,
3722                    offset,
3723                    clip_rect,
3724                });
3725
3726                swapchain_layers.push(SwapChainLayer {
3727                    clear_tiles: Vec::new(),
3728                    occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3729                })
3730            }
3731            tile_index_to_layer_index[idx] = Some(input_layers.len() - 1);
3732        }
3733
3734        assert_eq!(swapchain_layers.len(), input_layers.len());
3735
3736        if window_is_opaque {
3737            match input_layers.first_mut() {
3738                Some(_layer) => {
3739                    // If the window is opaque, and the first layer is a content layer
3740                    // then mark that as opaque.
3741                    // TODO(gw): This causes flickering in some cases when changing
3742                    //           layer count. We need to find out why so we can enable
3743                    //           selecting an opaque swapchain where possible.
3744                    // if let CompositorSurfaceUsage::Content = layer.usage {
3745                    //     layer.is_opaque = true;
3746                    // }
3747                }
3748                None => {
3749                    // If no tiles were present, and we expect an opaque window,
3750                    // add an empty layer to force a composite that clears the screen,
3751                    // to match existing semantics.
3752                    input_layers.push(CompositorInputLayer {
3753                        usage: CompositorSurfaceUsage::Content,
3754                        is_opaque: true,
3755                        offset: DeviceIntPoint::zero(),
3756                        clip_rect: device_size.into(),
3757                    });
3758
3759                    swapchain_layers.push(SwapChainLayer {
3760                        clear_tiles: Vec::new(),
3761                        occlusion: occlusion::FrontToBackBuilder::with_capacity(cap, cap),
3762                    });
3763                }
3764            }
3765        }
3766
3767        let mut full_render = false;
3768
3769        // Start compositing if using OS compositor
3770        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
3771            let input = CompositorInputConfig {
3772                enable_screenshot,
3773                layers: &input_layers,
3774            };
3775            full_render = compositor.begin_frame(&input);
3776        }
3777
3778        // Full render is requested when layer tree is updated.
3779        let mut partial_present_mode = if full_render {
3780            None
3781        } else {
3782            partial_present_mode
3783        };
3784
3785        assert_eq!(swapchain_layers.len(), input_layers.len());
3786
3787        // Recalculate dirty rect if external composite is used with layer compositor
3788        if let Some(ref _compositor) = self.compositor_config.layer_compositor() {
3789            if partial_present_mode.is_some() && use_external_composite {
3790                let mut combined_dirty_rect = DeviceRect::zero();
3791                let fb_rect = DeviceRect::from_size(frame_device_size.to_f32());
3792
3793                // Work out how many dirty rects WR produced, and if that's more than
3794                // what the device supports.
3795                for (idx, tile) in composite_state.tiles.iter().enumerate() {
3796                    if tile.kind == TileKind::Clear {
3797                        continue;
3798                    }
3799
3800                    let layer_index = match tile_index_to_layer_index[idx] {
3801                        None => {
3802                            continue;
3803                        }
3804                        Some(layer_index) => layer_index,
3805                    };
3806
3807                    let layer = &mut input_layers[layer_index];
3808                    // Skip compositing external images
3809                    match layer.usage {
3810                        CompositorSurfaceUsage::Content | CompositorSurfaceUsage::DebugOverlay => {}
3811                        CompositorSurfaceUsage::External { .. } => {
3812                            match tile.surface {
3813                                CompositeTileSurface::ExternalSurface { .. } => {}
3814                                CompositeTileSurface::Texture { .. }  |
3815                                CompositeTileSurface::Color { .. } |
3816                                CompositeTileSurface::Clear => {
3817                                    unreachable!();
3818                                },
3819                            }
3820                            continue;
3821                        }
3822                    }
3823
3824                    let dirty_rect = composite_state.get_device_rect(
3825                        &tile.local_dirty_rect,
3826                        tile.transform_index,
3827                    );
3828
3829                    // In pathological cases where a tile is extremely zoomed, it
3830                    // may end up with device coords outside the range of an i32,
3831                    // so clamp it to the frame buffer rect here, before it gets
3832                    // casted to an i32 rect below.
3833                    if let Some(dirty_rect) = dirty_rect.intersection(&fb_rect) {
3834                        combined_dirty_rect = combined_dirty_rect.union(&dirty_rect);
3835                    }
3836                }
3837
3838                let combined_dirty_rect = combined_dirty_rect.round();
3839                // layer compositor does not expect to draw previsou partial present regtions
3840                partial_present_mode = Some(PartialPresentMode::Single {
3841                    dirty_rect: combined_dirty_rect,
3842                });
3843            }
3844        }
3845
3846        // Check tiles handling with partial_present_mode
3847
3848        // NOTE: Tiles here are being iterated in front-to-back order by
3849        //       z-id, due to the sort in composite_state.end_frame()
3850        for (idx, tile) in composite_state.tiles.iter().enumerate() {
3851            let device_tile_box = composite_state.get_device_rect(
3852                &tile.local_rect,
3853                tile.transform_index
3854            );
3855
3856            // Determine a clip rect to apply to this tile, depending on what
3857            // the partial present mode is.
3858            let partial_clip_rect = match partial_present_mode {
3859                Some(PartialPresentMode::Single { dirty_rect }) => dirty_rect,
3860                None => device_tile_box,
3861            };
3862
3863            // Simple compositor needs the valid rect in device space to match clip rect
3864            let device_valid_rect = composite_state
3865                .get_device_rect(&tile.local_valid_rect, tile.transform_index);
3866
3867            let rect = device_tile_box
3868                .intersection_unchecked(&tile.device_clip_rect)
3869                .intersection_unchecked(&partial_clip_rect)
3870                .intersection_unchecked(&device_valid_rect);
3871
3872            if rect.is_empty() {
3873                continue;
3874            }
3875
3876            let layer_index = match tile_index_to_layer_index[idx] {
3877                None => {
3878                    // The rect of partial present should be subset of the rect of full render.
3879                    error!("rect {:?} should have valid layer index", rect);
3880                    continue;
3881                }
3882                Some(layer_index) => layer_index,
3883            };
3884
3885            // For normal tiles, add to occlusion tracker. For clear tiles, add directly
3886            // to the swapchain tile list
3887            let layer = &mut swapchain_layers[layer_index];
3888
3889            // Clear tiles overwrite whatever is under them, so they are treated as opaque.
3890            match tile.kind {
3891                TileKind::Opaque | TileKind::Alpha => {
3892                    let is_opaque = tile.kind != TileKind::Alpha;
3893
3894                    match tile.clip_index {
3895                        Some(clip_index) => {
3896                            let clip = composite_state.get_compositor_clip(clip_index);
3897
3898                                // TODO(gw): Make segment builder generic on unit to avoid casts below.
3899                            segment_builder.initialize(
3900                                rect.cast_unit(),
3901                                None,
3902                                rect.cast_unit(),
3903                            );
3904                            segment_builder.push_clip_rect(
3905                                clip.rect.cast_unit(),
3906                                Some(clip.radius),
3907                                ClipMode::Clip,
3908                            );
3909                            segment_builder.build(|segment| {
3910                                let key = OcclusionItemKey { tile_index: idx, needs_mask: segment.has_mask };
3911
3912                                layer.occlusion.add(
3913                                    &segment.rect.cast_unit(),
3914                                    is_opaque && !segment.has_mask,
3915                                    key,
3916                                );
3917                            });
3918                        }
3919                        None => {
3920                            layer.occlusion.add(&rect, is_opaque, OcclusionItemKey {
3921                                tile_index: idx,
3922                                needs_mask: false,
3923                            });
3924                        }
3925                    }
3926                }
3927                TileKind::Clear => {
3928                    // Clear tiles are specific to how we render the window buttons on
3929                    // Windows 8. They clobber what's under them so they can be treated as opaque,
3930                    // but require a different blend state so they will be rendered after the opaque
3931                    // tiles and before transparent ones.
3932                    layer.clear_tiles.push(occlusion::Item { rectangle: rect, key: OcclusionItemKey { tile_index: idx, needs_mask: false } });
3933                }
3934            }
3935        }
3936
3937        assert_eq!(swapchain_layers.len(), input_layers.len());
3938
3939        for (layer_index, (layer, swapchain_layer)) in input_layers.iter().zip(swapchain_layers.iter()).enumerate() {
3940            self.device.reset_state();
3941
3942            // Skip compositing external images or debug layers here
3943            match layer.usage {
3944                CompositorSurfaceUsage::Content => {}
3945                CompositorSurfaceUsage::External { .. } | CompositorSurfaceUsage::DebugOverlay => {
3946                    continue;
3947                }
3948            }
3949
3950            let clear_color = if layer_index == 0 {
3951                self.clear_color
3952            } else {
3953                ColorF::TRANSPARENT
3954            };
3955
3956            if let Some(ref mut _compositor) = self.compositor_config.layer_compositor() {
3957                if let Some(PartialPresentMode::Single { dirty_rect }) = partial_present_mode {
3958                    if dirty_rect.is_empty() {
3959                        continue;
3960                    }
3961                }
3962            }
3963
3964            let draw_target = match self.compositor_config {
3965                CompositorConfig::Layer { ref mut compositor } => {
3966                    match partial_present_mode {
3967                        Some(PartialPresentMode::Single { dirty_rect }) => {
3968                            compositor.bind_layer(layer_index, &[dirty_rect.to_i32()]);
3969                        }
3970                        None => {
3971                            compositor.bind_layer(layer_index, &[]);
3972                        }
3973                    };
3974
3975                    DrawTarget::NativeSurface {
3976                        offset: -layer.offset,
3977                        external_fbo_id: 0,
3978                        dimensions: frame_device_size,
3979                    }
3980                }
3981                // Native can be hit when switching compositors (disable when using Layer)
3982                CompositorConfig::Draw { .. } | CompositorConfig::Native { .. } => {
3983                    fb_draw_target
3984                }
3985            };
3986
3987            // TODO(gwc): When supporting external attached swapchains, need to skip the composite pass here
3988
3989            // Draw each compositing pass in to a swap chain
3990            self.composite_pass(
3991                composite_state,
3992                draw_target,
3993                clear_color,
3994                projection,
3995                results,
3996                partial_present_mode,
3997                swapchain_layer,
3998            );
3999
4000            if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
4001                match partial_present_mode {
4002                    Some(PartialPresentMode::Single { dirty_rect }) => {
4003                        compositor.present_layer(layer_index, &[dirty_rect.to_i32()]);
4004                    }
4005                    None => {
4006                        compositor.present_layer(layer_index, &[]);
4007                    }
4008                };
4009            }
4010        }
4011
4012        // End frame notify for experimental compositor
4013        if let Some(ref mut compositor) = self.compositor_config.layer_compositor() {
4014            for (layer_index, layer) in input_layers.iter().enumerate() {
4015                // External surfaces need transform applied, but content
4016                // surfaces are always at identity
4017                let transform = match layer.usage {
4018                    CompositorSurfaceUsage::Content => CompositorSurfaceTransform::identity(),
4019                    CompositorSurfaceUsage::External { transform_index, .. } => composite_state.get_compositor_transform(transform_index),
4020                    CompositorSurfaceUsage::DebugOverlay => CompositorSurfaceTransform::identity(),
4021                };
4022
4023                compositor.add_surface(
4024                    layer_index,
4025                    transform,
4026                    layer.clip_rect,
4027                    ImageRendering::Auto,
4028                );
4029            }
4030        }
4031    }
4032
4033    fn clear_render_target(
4034        &mut self,
4035        target: &RenderTarget,
4036        draw_target: DrawTarget,
4037        framebuffer_kind: FramebufferKind,
4038        projection: &default::Transform3D<f32>,
4039        stats: &mut RendererStats,
4040    ) {
4041        let needs_depth = target.needs_depth();
4042
4043        let clear_depth = if needs_depth {
4044            Some(1.0)
4045        } else {
4046            None
4047        };
4048
4049        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_TARGET);
4050
4051        self.device.disable_depth();
4052        self.set_blend(false, framebuffer_kind);
4053
4054        let is_alpha = target.target_kind == RenderTargetKind::Alpha;
4055        let require_precise_clear = target.cached;
4056
4057        // On some Mali-T devices we have observed crashes in subsequent draw calls
4058        // immediately after clearing the alpha render target regions with glClear().
4059        // Using the shader to clear the regions avoids the crash. See bug 1638593.
4060        let clear_with_quads = (target.cached && self.clear_caches_with_quads)
4061            || (is_alpha && self.clear_alpha_targets_with_quads);
4062
4063        let favor_partial_updates = self.device.get_capabilities().supports_render_target_partial_update
4064            && self.enable_clear_scissor;
4065
4066        // On some Adreno 4xx devices we have seen render tasks to alpha targets have no
4067        // effect unless the target is fully cleared prior to rendering. See bug 1714227.
4068        let full_clears_on_adreno = is_alpha && self.device.get_capabilities().requires_alpha_target_full_clear;
4069        let require_full_clear = !require_precise_clear
4070            && (full_clears_on_adreno || !favor_partial_updates);
4071
4072        let clear_color = target
4073            .clear_color
4074            .map(|color| color.to_array());
4075
4076        let mut cleared_depth = false;
4077        if clear_with_quads {
4078            // Will be handled last. Only specific rects will be cleared.
4079        } else if require_precise_clear {
4080            // Only clear specific rects
4081            for (rect, color) in &target.clears {
4082                self.device.clear_target(
4083                    Some(color.to_array()),
4084                    None,
4085                    Some(draw_target.to_framebuffer_rect(*rect)),
4086                );
4087            }
4088        } else {
4089            // At this point we know we don't require precise clears for correctness.
4090            // We may still attempt to restruct the clear rect as an optimization on
4091            // some configurations.
4092            let clear_rect = if require_full_clear {
4093                None
4094            } else {
4095                match draw_target {
4096                    DrawTarget::Default { rect, total_size, .. } => {
4097                        if rect.min == FramebufferIntPoint::zero() && rect.size() == total_size {
4098                            // Whole screen is covered, no need for scissor
4099                            None
4100                        } else {
4101                            Some(rect)
4102                        }
4103                    }
4104                    DrawTarget::Texture { .. } => {
4105                        // TODO(gw): Applying a scissor rect and minimal clear here
4106                        // is a very large performance win on the Intel and nVidia
4107                        // GPUs that I have tested with. It's possible it may be a
4108                        // performance penalty on other GPU types - we should test this
4109                        // and consider different code paths.
4110                        //
4111                        // Note: The above measurements were taken when render
4112                        // target slices were minimum 2048x2048. Now that we size
4113                        // them adaptively, this may be less of a win (except perhaps
4114                        // on a mostly-unused last slice of a large texture array).
4115                        target.used_rect.map(|rect| draw_target.to_framebuffer_rect(rect))
4116                    }
4117                    // Full clear.
4118                    _ => None,
4119                }
4120            };
4121
4122            self.device.clear_target(
4123                clear_color,
4124                clear_depth,
4125                clear_rect,
4126            );
4127            cleared_depth = true;
4128        }
4129
4130        // Make sure to clear the depth buffer if it is used.
4131        if needs_depth && !cleared_depth {
4132            // TODO: We could also clear the depth buffer via ps_clear. This
4133            // is done by picture cache targets in some cases.
4134            self.device.clear_target(None, clear_depth, None);
4135        }
4136
4137        // Finally, if we decided to clear with quads or if we need to clear
4138        // some areas with specific colors that don't match the global clear
4139        // color, clear more areas using a draw call.
4140
4141        let mut clear_instances = Vec::with_capacity(target.clears.len());
4142        for (rect, color) in &target.clears {
4143            if clear_with_quads || (!require_precise_clear && target.clear_color != Some(*color)) {
4144                let rect = rect.to_f32();
4145                clear_instances.push(ClearInstance {
4146                    rect: [
4147                        rect.min.x, rect.min.y,
4148                        rect.max.x, rect.max.y,
4149                    ],
4150                    color: color.to_array(),
4151                })
4152            }
4153        }
4154
4155        if !clear_instances.is_empty() {
4156            self.shaders.borrow_mut().ps_clear().bind(
4157                &mut self.device,
4158                &projection,
4159                None,
4160                &mut self.renderer_errors,
4161                &mut self.profile,
4162            );
4163            self.draw_instanced_batch(
4164                &clear_instances,
4165                VertexArrayKind::Clear,
4166                &BatchTextures::empty(),
4167                stats,
4168            );
4169        }
4170    }
4171
4172    fn draw_render_target(
4173        &mut self,
4174        texture_id: CacheTextureId,
4175        target: &RenderTarget,
4176        render_tasks: &RenderTaskGraph,
4177        stats: &mut RendererStats,
4178    ) {
4179        let needs_depth = target.needs_depth();
4180
4181        let texture = self.texture_resolver.get_cache_texture_mut(&texture_id);
4182        if needs_depth {
4183            self.device.reuse_render_target::<u8>(
4184                texture,
4185                RenderTargetInfo { has_depth: needs_depth },
4186            );
4187        }
4188
4189        let draw_target = DrawTarget::from_texture(
4190            texture,
4191            needs_depth,
4192        );
4193
4194        let projection = Transform3D::ortho(
4195            0.0,
4196            draw_target.dimensions().width as f32,
4197            0.0,
4198            draw_target.dimensions().height as f32,
4199            self.device.ortho_near_plane(),
4200            self.device.ortho_far_plane(),
4201        );
4202
4203        profile_scope!("draw_render_target");
4204        let _gm = self.gpu_profiler.start_marker("render target");
4205
4206        let counter = match target.target_kind {
4207            RenderTargetKind::Color => profiler::COLOR_PASSES,
4208            RenderTargetKind::Alpha => profiler::ALPHA_PASSES,
4209        };
4210        self.profile.inc(counter);
4211
4212        let sampler_query = match target.target_kind {
4213            RenderTargetKind::Color => None,
4214            RenderTargetKind::Alpha => Some(self.gpu_profiler.start_sampler(GPU_SAMPLER_TAG_ALPHA)),
4215        };
4216
4217        // sanity check for the depth buffer
4218        if let DrawTarget::Texture { with_depth, .. } = draw_target {
4219            assert!(with_depth >= target.needs_depth());
4220        }
4221
4222        let framebuffer_kind = if draw_target.is_default() {
4223            FramebufferKind::Main
4224        } else {
4225            FramebufferKind::Other
4226        };
4227
4228        self.device.bind_draw_target(draw_target);
4229
4230        if self.device.get_capabilities().supports_qcom_tiled_rendering {
4231            let preserve_mask = match target.clear_color {
4232                Some(_) => 0,
4233                None => gl::COLOR_BUFFER_BIT0_QCOM,
4234            };
4235            if let Some(used_rect) = target.used_rect {
4236                self.device.gl().start_tiling_qcom(
4237                    used_rect.min.x.max(0) as _,
4238                    used_rect.min.y.max(0) as _,
4239                    used_rect.width() as _,
4240                    used_rect.height() as _,
4241                    preserve_mask,
4242                );
4243            }
4244        }
4245
4246        if needs_depth {
4247            self.device.enable_depth_write();
4248        } else {
4249            self.device.disable_depth_write();
4250        }
4251
4252        self.clear_render_target(
4253            target,
4254            draw_target,
4255            framebuffer_kind,
4256            &projection,
4257            stats,
4258        );
4259
4260        if needs_depth {
4261            self.device.disable_depth_write();
4262        }
4263
4264        // Handle any resolves from parent pictures to this target
4265        self.handle_resolves(
4266            &target.resolve_ops,
4267            render_tasks,
4268            draw_target,
4269        );
4270
4271        // Handle any blits from the texture cache to this target.
4272        self.handle_blits(
4273            &target.blits,
4274            render_tasks,
4275            draw_target,
4276        );
4277
4278        // Draw any borders for this target.
4279        if !target.border_segments_solid.is_empty() ||
4280           !target.border_segments_complex.is_empty()
4281        {
4282            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_BORDER);
4283
4284            self.set_blend(true, FramebufferKind::Other);
4285            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
4286
4287            if !target.border_segments_solid.is_empty() {
4288                self.shaders.borrow_mut().cs_border_solid().bind(
4289                    &mut self.device,
4290                    &projection,
4291                    None,
4292                    &mut self.renderer_errors,
4293                    &mut self.profile,
4294                );
4295
4296                self.draw_instanced_batch(
4297                    &target.border_segments_solid,
4298                    VertexArrayKind::Border,
4299                    &BatchTextures::empty(),
4300                    stats,
4301                );
4302            }
4303
4304            if !target.border_segments_complex.is_empty() {
4305                self.shaders.borrow_mut().cs_border_segment().bind(
4306                    &mut self.device,
4307                    &projection,
4308                    None,
4309                    &mut self.renderer_errors,
4310                    &mut self.profile,
4311                );
4312
4313                self.draw_instanced_batch(
4314                    &target.border_segments_complex,
4315                    VertexArrayKind::Border,
4316                    &BatchTextures::empty(),
4317                    stats,
4318                );
4319            }
4320
4321            self.set_blend(false, FramebufferKind::Other);
4322        }
4323
4324        // Draw any line decorations for this target.
4325        if !target.line_decorations.is_empty() {
4326            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINE_DECORATION);
4327
4328            self.set_blend(true, FramebufferKind::Other);
4329            self.set_blend_mode_premultiplied_alpha(FramebufferKind::Other);
4330
4331            self.shaders.borrow_mut().cs_line_decoration().bind(
4332                &mut self.device,
4333                &projection,
4334                None,
4335                &mut self.renderer_errors,
4336                &mut self.profile,
4337            );
4338
4339            self.draw_instanced_batch(
4340                &target.line_decorations,
4341                VertexArrayKind::LineDecoration,
4342                &BatchTextures::empty(),
4343                stats,
4344            );
4345
4346            self.set_blend(false, FramebufferKind::Other);
4347        }
4348
4349        // Draw any fast path linear gradients for this target.
4350        if !target.fast_linear_gradients.is_empty() {
4351            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_FAST_LINEAR_GRADIENT);
4352
4353            self.set_blend(false, FramebufferKind::Other);
4354
4355            self.shaders.borrow_mut().cs_fast_linear_gradient().bind(
4356                &mut self.device,
4357                &projection,
4358                None,
4359                &mut self.renderer_errors,
4360                &mut self.profile,
4361            );
4362
4363            self.draw_instanced_batch(
4364                &target.fast_linear_gradients,
4365                VertexArrayKind::FastLinearGradient,
4366                &BatchTextures::empty(),
4367                stats,
4368            );
4369        }
4370
4371        // Draw any linear gradients for this target.
4372        if !target.linear_gradients.is_empty() {
4373            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_LINEAR_GRADIENT);
4374
4375            self.set_blend(false, FramebufferKind::Other);
4376
4377            self.shaders.borrow_mut().cs_linear_gradient().bind(
4378                &mut self.device,
4379                &projection,
4380                None,
4381                &mut self.renderer_errors,
4382                &mut self.profile,
4383            );
4384
4385            if let Some(ref texture) = self.dither_matrix_texture {
4386                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4387            }
4388
4389            self.draw_instanced_batch(
4390                &target.linear_gradients,
4391                VertexArrayKind::LinearGradient,
4392                &BatchTextures::empty(),
4393                stats,
4394            );
4395        }
4396
4397        // Draw any radial gradients for this target.
4398        if !target.radial_gradients.is_empty() {
4399            let _timer = self.gpu_profiler.start_timer(GPU_TAG_RADIAL_GRADIENT);
4400
4401            self.set_blend(false, FramebufferKind::Other);
4402
4403            self.shaders.borrow_mut().cs_radial_gradient().bind(
4404                &mut self.device,
4405                &projection,
4406                None,
4407                &mut self.renderer_errors,
4408                &mut self.profile,
4409            );
4410
4411            if let Some(ref texture) = self.dither_matrix_texture {
4412                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4413            }
4414
4415            self.draw_instanced_batch(
4416                &target.radial_gradients,
4417                VertexArrayKind::RadialGradient,
4418                &BatchTextures::empty(),
4419                stats,
4420            );
4421        }
4422
4423        // Draw any conic gradients for this target.
4424        if !target.conic_gradients.is_empty() {
4425            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CONIC_GRADIENT);
4426
4427            self.set_blend(false, FramebufferKind::Other);
4428
4429            self.shaders.borrow_mut().cs_conic_gradient().bind(
4430                &mut self.device,
4431                &projection,
4432                None,
4433                &mut self.renderer_errors,
4434                &mut self.profile,
4435            );
4436
4437            if let Some(ref texture) = self.dither_matrix_texture {
4438                self.device.bind_texture(TextureSampler::Dither, texture, Swizzle::default());
4439            }
4440
4441            self.draw_instanced_batch(
4442                &target.conic_gradients,
4443                VertexArrayKind::ConicGradient,
4444                &BatchTextures::empty(),
4445                stats,
4446            );
4447        }
4448
4449        // Draw any blurs for this target.
4450        // Blurs are rendered as a standard 2-pass
4451        // separable implementation.
4452        // TODO(gw): In the future, consider having
4453        //           fast path blur shaders for common
4454        //           blur radii with fixed weights.
4455        if !target.vertical_blurs.is_empty() || !target.horizontal_blurs.is_empty() {
4456            let _timer = self.gpu_profiler.start_timer(GPU_TAG_BLUR);
4457
4458            self.set_blend(false, framebuffer_kind);
4459            self.shaders.borrow_mut().cs_blur_rgba8()
4460                .bind(&mut self.device, &projection, None, &mut self.renderer_errors, &mut self.profile);
4461
4462            if !target.vertical_blurs.is_empty() {
4463                self.draw_blurs(
4464                    &target.vertical_blurs,
4465                    stats,
4466                );
4467            }
4468
4469            if !target.horizontal_blurs.is_empty() {
4470                self.draw_blurs(
4471                    &target.horizontal_blurs,
4472                    stats,
4473                );
4474            }
4475        }
4476
4477        self.handle_scaling(
4478            &target.scalings,
4479            &projection,
4480            stats,
4481        );
4482
4483        for (ref textures, ref filters) in &target.svg_filters {
4484            self.handle_svg_filters(
4485                textures,
4486                filters,
4487                &projection,
4488                stats,
4489            );
4490        }
4491
4492        for (ref textures, ref filters) in &target.svg_nodes {
4493            self.handle_svg_nodes(textures, filters, &projection, stats);
4494        }
4495
4496        for alpha_batch_container in &target.alpha_batch_containers {
4497            self.draw_alpha_batch_container(
4498                alpha_batch_container,
4499                draw_target,
4500                framebuffer_kind,
4501                &projection,
4502                render_tasks,
4503                stats,
4504            );
4505        }
4506
4507        self.handle_prims(
4508            &draw_target,
4509            &target.prim_instances,
4510            &target.prim_instances_with_scissor,
4511            &projection,
4512            stats,
4513        );
4514
4515        // Draw the clip items into the tiled alpha mask.
4516        {
4517            let _timer = self.gpu_profiler.start_timer(GPU_TAG_CACHE_CLIP);
4518
4519            // TODO(gw): Consider grouping multiple clip masks per shader
4520            //           invocation here to reduce memory bandwith further?
4521
4522            if !target.clip_batcher.primary_clips.is_empty() {
4523                // Draw the primary clip mask - since this is the first mask
4524                // for the task, we can disable blending, knowing that it will
4525                // overwrite every pixel in the mask area.
4526                self.set_blend(false, FramebufferKind::Other);
4527                self.draw_clip_batch_list(
4528                    &target.clip_batcher.primary_clips,
4529                    &projection,
4530                    stats,
4531                );
4532            }
4533
4534            if !target.clip_batcher.secondary_clips.is_empty() {
4535                // switch to multiplicative blending for secondary masks, using
4536                // multiplicative blending to accumulate clips into the mask.
4537                self.set_blend(true, FramebufferKind::Other);
4538                self.set_blend_mode_multiply(FramebufferKind::Other);
4539                self.draw_clip_batch_list(
4540                    &target.clip_batcher.secondary_clips,
4541                    &projection,
4542                    stats,
4543                );
4544            }
4545
4546            self.handle_clips(
4547                &draw_target,
4548                &target.clip_masks,
4549                &projection,
4550                stats,
4551            );
4552        }
4553
4554        if needs_depth {
4555            self.device.invalidate_depth_target();
4556        }
4557        if self.device.get_capabilities().supports_qcom_tiled_rendering {
4558            self.device.gl().end_tiling_qcom(gl::COLOR_BUFFER_BIT0_QCOM);
4559        }
4560
4561        if let Some(sampler) = sampler_query {
4562            self.gpu_profiler.finish_sampler(sampler);
4563        }
4564    }
4565
4566    fn draw_blurs(
4567        &mut self,
4568        blurs: &FastHashMap<TextureSource, FrameVec<BlurInstance>>,
4569        stats: &mut RendererStats,
4570    ) {
4571        for (texture, blurs) in blurs {
4572            let textures = BatchTextures::composite_rgb(
4573                *texture,
4574            );
4575
4576            self.draw_instanced_batch(
4577                blurs,
4578                VertexArrayKind::Blur,
4579                &textures,
4580                stats,
4581            );
4582        }
4583    }
4584
4585    /// Draw all the instances in a clip batcher list to the current target.
4586    fn draw_clip_batch_list(
4587        &mut self,
4588        list: &ClipBatchList,
4589        projection: &default::Transform3D<f32>,
4590        stats: &mut RendererStats,
4591    ) {
4592        if self.debug_flags.contains(DebugFlags::DISABLE_CLIP_MASKS) {
4593            return;
4594        }
4595
4596        // draw rounded cornered rectangles
4597        if !list.slow_rectangles.is_empty() {
4598            let _gm2 = self.gpu_profiler.start_marker("slow clip rectangles");
4599            self.shaders.borrow_mut().cs_clip_rectangle_slow().bind(
4600                &mut self.device,
4601                projection,
4602                None,
4603                &mut self.renderer_errors,
4604                &mut self.profile,
4605            );
4606            self.draw_instanced_batch(
4607                &list.slow_rectangles,
4608                VertexArrayKind::ClipRect,
4609                &BatchTextures::empty(),
4610                stats,
4611            );
4612        }
4613        if !list.fast_rectangles.is_empty() {
4614            let _gm2 = self.gpu_profiler.start_marker("fast clip rectangles");
4615            self.shaders.borrow_mut().cs_clip_rectangle_fast().bind(
4616                &mut self.device,
4617                projection,
4618                None,
4619                &mut self.renderer_errors,
4620                &mut self.profile,
4621            );
4622            self.draw_instanced_batch(
4623                &list.fast_rectangles,
4624                VertexArrayKind::ClipRect,
4625                &BatchTextures::empty(),
4626                stats,
4627            );
4628        }
4629
4630        // draw box-shadow clips
4631        for (mask_texture_id, items) in list.box_shadows.iter() {
4632            let _gm2 = self.gpu_profiler.start_marker("box-shadows");
4633            let textures = BatchTextures::composite_rgb(*mask_texture_id);
4634            self.shaders.borrow_mut().cs_clip_box_shadow()
4635                .bind(&mut self.device, projection, None, &mut self.renderer_errors, &mut self.profile);
4636            self.draw_instanced_batch(
4637                items,
4638                VertexArrayKind::ClipBoxShadow,
4639                &textures,
4640                stats,
4641            );
4642        }
4643    }
4644
4645    fn update_deferred_resolves(&mut self, deferred_resolves: &[DeferredResolve]) -> Option<GpuCacheUpdateList> {
4646        // The first thing we do is run through any pending deferred
4647        // resolves, and use a callback to get the UV rect for this
4648        // custom item. Then we patch the resource_rects structure
4649        // here before it's uploaded to the GPU.
4650        if deferred_resolves.is_empty() {
4651            return None;
4652        }
4653
4654        let handler = self.external_image_handler
4655            .as_mut()
4656            .expect("Found external image, but no handler set!");
4657
4658        let mut list = GpuCacheUpdateList {
4659            frame_id: FrameId::INVALID,
4660            clear: false,
4661            height: self.gpu_cache_texture.get_height(),
4662            blocks: Vec::new(),
4663            updates: Vec::new(),
4664            debug_commands: Vec::new(),
4665        };
4666
4667        for (i, deferred_resolve) in deferred_resolves.iter().enumerate() {
4668            self.gpu_profiler.place_marker("deferred resolve");
4669            let props = &deferred_resolve.image_properties;
4670            let ext_image = props
4671                .external_image
4672                .expect("BUG: Deferred resolves must be external images!");
4673            // Provide rendering information for NativeTexture external images.
4674            let image = handler.lock(ext_image.id, ext_image.channel_index);
4675            let texture_target = match ext_image.image_type {
4676                ExternalImageType::TextureHandle(target) => target,
4677                ExternalImageType::Buffer => {
4678                    panic!("not a suitable image type in update_deferred_resolves()");
4679                }
4680            };
4681
4682            // In order to produce the handle, the external image handler may call into
4683            // the GL context and change some states.
4684            self.device.reset_state();
4685
4686            let texture = match image.source {
4687                ExternalImageSource::NativeTexture(texture_id) => {
4688                    ExternalTexture::new(
4689                        texture_id,
4690                        texture_target,
4691                        image.uv,
4692                        deferred_resolve.rendering,
4693                    )
4694                }
4695                ExternalImageSource::Invalid => {
4696                    warn!("Invalid ext-image");
4697                    debug!(
4698                        "For ext_id:{:?}, channel:{}.",
4699                        ext_image.id,
4700                        ext_image.channel_index
4701                    );
4702                    // Just use 0 as the gl handle for this failed case.
4703                    ExternalTexture::new(
4704                        0,
4705                        texture_target,
4706                        image.uv,
4707                        deferred_resolve.rendering,
4708                    )
4709                }
4710                ExternalImageSource::RawData(_) => {
4711                    panic!("Raw external data is not expected for deferred resolves!");
4712                }
4713            };
4714
4715            self.texture_resolver
4716                .external_images
4717                .insert(DeferredResolveIndex(i as u32), texture);
4718
4719            list.updates.push(GpuCacheUpdate::Copy {
4720                block_index: list.blocks.len(),
4721                block_count: BLOCKS_PER_UV_RECT,
4722                address: deferred_resolve.address,
4723            });
4724            list.blocks.push(image.uv.into());
4725            list.blocks.push([0f32; 4].into());
4726        }
4727
4728        Some(list)
4729    }
4730
4731    fn unlock_external_images(
4732        &mut self,
4733        deferred_resolves: &[DeferredResolve],
4734    ) {
4735        if !self.texture_resolver.external_images.is_empty() {
4736            let handler = self.external_image_handler
4737                .as_mut()
4738                .expect("Found external image, but no handler set!");
4739
4740            for (index, _) in self.texture_resolver.external_images.drain() {
4741                let props = &deferred_resolves[index.0 as usize].image_properties;
4742                let ext_image = props
4743                    .external_image
4744                    .expect("BUG: Deferred resolves must be external images!");
4745                handler.unlock(ext_image.id, ext_image.channel_index);
4746            }
4747        }
4748    }
4749
4750    /// Update the dirty rects based on current compositing mode and config
4751    // TODO(gw): This can be tidied up significantly once the Draw compositor
4752    //           is implemented in terms of the compositor trait.
4753    fn calculate_dirty_rects(
4754        &mut self,
4755        buffer_age: usize,
4756        composite_state: &CompositeState,
4757        draw_target_dimensions: DeviceIntSize,
4758        results: &mut RenderResults,
4759    ) -> Option<PartialPresentMode> {
4760        let mut partial_present_mode = None;
4761
4762        let (max_partial_present_rects, draw_previous_partial_present_regions) = match self.current_compositor_kind {
4763            CompositorKind::Native { .. } => {
4764                // Assume that we can return a single dirty rect for native
4765                // compositor for now, and that there is no buffer-age functionality.
4766                // These params can be exposed by the compositor capabilities struct
4767                // as the Draw compositor is ported to use it.
4768                (1, false)
4769            }
4770            CompositorKind::Draw { draw_previous_partial_present_regions, max_partial_present_rects } => {
4771                (max_partial_present_rects, draw_previous_partial_present_regions)
4772            }
4773            CompositorKind::Layer { .. } => {
4774                (1, false)
4775            }
4776        };
4777
4778        if max_partial_present_rects > 0 {
4779            let prev_frames_damage_rect = if let Some(..) = self.compositor_config.partial_present() {
4780                self.buffer_damage_tracker
4781                    .get_damage_rect(buffer_age)
4782                    .or_else(|| Some(DeviceRect::from_size(draw_target_dimensions.to_f32())))
4783            } else {
4784                None
4785            };
4786
4787            let can_use_partial_present =
4788                composite_state.dirty_rects_are_valid &&
4789                !self.force_redraw &&
4790                !(prev_frames_damage_rect.is_none() && draw_previous_partial_present_regions) &&
4791                !self.debug_overlay_state.is_enabled;
4792
4793            if can_use_partial_present {
4794                let mut combined_dirty_rect = DeviceRect::zero();
4795                let fb_rect = DeviceRect::from_size(draw_target_dimensions.to_f32());
4796
4797                // Work out how many dirty rects WR produced, and if that's more than
4798                // what the device supports.
4799                for tile in &composite_state.tiles {
4800                    if tile.kind == TileKind::Clear {
4801                        continue;
4802                    }
4803                    let dirty_rect = composite_state.get_device_rect(
4804                        &tile.local_dirty_rect,
4805                        tile.transform_index,
4806                    );
4807
4808                    // In pathological cases where a tile is extremely zoomed, it
4809                    // may end up with device coords outside the range of an i32,
4810                    // so clamp it to the frame buffer rect here, before it gets
4811                    // casted to an i32 rect below.
4812                    if let Some(dirty_rect) = dirty_rect.intersection(&fb_rect) {
4813                        combined_dirty_rect = combined_dirty_rect.union(&dirty_rect);
4814                    }
4815                }
4816
4817                let combined_dirty_rect = combined_dirty_rect.round();
4818                let combined_dirty_rect_i32 = combined_dirty_rect.to_i32();
4819                // Return this frame's dirty region. If nothing has changed, don't return any dirty
4820                // rects at all (the client can use this as a signal to skip present completely).
4821                if !combined_dirty_rect.is_empty() {
4822                    results.dirty_rects.push(combined_dirty_rect_i32);
4823                }
4824
4825                // Track this frame's dirty region, for calculating subsequent frames' damage.
4826                if draw_previous_partial_present_regions {
4827                    self.buffer_damage_tracker.push_dirty_rect(&combined_dirty_rect);
4828                }
4829
4830                // If the implementation requires manually keeping the buffer consistent,
4831                // then we must combine this frame's dirty region with that of previous frames
4832                // to determine the total_dirty_rect. The is used to determine what region we
4833                // render to, and is what we send to the compositor as the buffer damage region
4834                // (eg for KHR_partial_update).
4835                let total_dirty_rect = if draw_previous_partial_present_regions {
4836                    combined_dirty_rect.union(&prev_frames_damage_rect.unwrap())
4837                } else {
4838                    combined_dirty_rect
4839                };
4840
4841                partial_present_mode = Some(PartialPresentMode::Single {
4842                    dirty_rect: total_dirty_rect,
4843                });
4844            } else {
4845                // If we don't have a valid partial present scenario, return a single
4846                // dirty rect to the client that covers the entire framebuffer.
4847                let fb_rect = DeviceIntRect::from_size(
4848                    draw_target_dimensions,
4849                );
4850                results.dirty_rects.push(fb_rect);
4851
4852                if draw_previous_partial_present_regions {
4853                    self.buffer_damage_tracker.push_dirty_rect(&fb_rect.to_f32());
4854                }
4855            }
4856
4857            self.force_redraw = false;
4858        }
4859
4860        partial_present_mode
4861    }
4862
4863    fn bind_frame_data(&mut self, frame: &mut Frame) {
4864        profile_scope!("bind_frame_data");
4865
4866        let _timer = self.gpu_profiler.start_timer(GPU_TAG_SETUP_DATA);
4867
4868        self.vertex_data_textures[self.current_vertex_data_textures].update(
4869            &mut self.device,
4870            &mut self.texture_upload_pbo_pool,
4871            frame,
4872        );
4873        self.current_vertex_data_textures =
4874            (self.current_vertex_data_textures + 1) % VERTEX_DATA_TEXTURE_COUNT;
4875    }
4876
4877    fn update_native_surfaces(&mut self) {
4878        profile_scope!("update_native_surfaces");
4879
4880        match self.compositor_config {
4881            CompositorConfig::Native { ref mut compositor, .. } => {
4882                for op in self.pending_native_surface_updates.drain(..) {
4883                    match op.details {
4884                        NativeSurfaceOperationDetails::CreateSurface { id, virtual_offset, tile_size, is_opaque } => {
4885                            let _inserted = self.allocated_native_surfaces.insert(id);
4886                            debug_assert!(_inserted, "bug: creating existing surface");
4887                            compositor.create_surface(
4888                                    &mut self.device,
4889                                    id,
4890                                    virtual_offset,
4891                                    tile_size,
4892                                    is_opaque,
4893                            );
4894                        }
4895                        NativeSurfaceOperationDetails::CreateExternalSurface { id, is_opaque } => {
4896                            let _inserted = self.allocated_native_surfaces.insert(id);
4897                            debug_assert!(_inserted, "bug: creating existing surface");
4898                            compositor.create_external_surface(
4899                                &mut self.device,
4900                                id,
4901                                is_opaque,
4902                            );
4903                        }
4904                        NativeSurfaceOperationDetails::CreateBackdropSurface { id, color } => {
4905                            let _inserted = self.allocated_native_surfaces.insert(id);
4906                            debug_assert!(_inserted, "bug: creating existing surface");
4907                            compositor.create_backdrop_surface(
4908                                &mut self.device,
4909                                id,
4910                                color,
4911                            );
4912                        }
4913                        NativeSurfaceOperationDetails::DestroySurface { id } => {
4914                            let _existed = self.allocated_native_surfaces.remove(&id);
4915                            debug_assert!(_existed, "bug: removing unknown surface");
4916                            compositor.destroy_surface(&mut self.device, id);
4917                        }
4918                        NativeSurfaceOperationDetails::CreateTile { id } => {
4919                            compositor.create_tile(&mut self.device, id);
4920                        }
4921                        NativeSurfaceOperationDetails::DestroyTile { id } => {
4922                            compositor.destroy_tile(&mut self.device, id);
4923                        }
4924                        NativeSurfaceOperationDetails::AttachExternalImage { id, external_image } => {
4925                            compositor.attach_external_image(&mut self.device, id, external_image);
4926                        }
4927                    }
4928                }
4929            }
4930            CompositorConfig::Draw { .. } | CompositorConfig::Layer { .. } => {
4931                // Ensure nothing is added in simple composite mode, since otherwise
4932                // memory will leak as this doesn't get drained
4933                debug_assert!(self.pending_native_surface_updates.is_empty());
4934            }
4935        }
4936    }
4937
4938    fn create_gpu_buffer_texture<T: Texel>(
4939        &mut self,
4940        buffer: &GpuBuffer<T>,
4941        sampler: TextureSampler,
4942    ) -> Option<Texture> {
4943        if buffer.is_empty() {
4944            None
4945        } else {
4946            let gpu_buffer_texture = self.device.create_texture(
4947                ImageBufferKind::Texture2D,
4948                buffer.format,
4949                buffer.size.width,
4950                buffer.size.height,
4951                TextureFilter::Nearest,
4952                None,
4953            );
4954
4955            self.device.bind_texture(
4956                sampler,
4957                &gpu_buffer_texture,
4958                Swizzle::default(),
4959            );
4960
4961            self.device.upload_texture_immediate(
4962                &gpu_buffer_texture,
4963                &buffer.data,
4964            );
4965
4966            Some(gpu_buffer_texture)
4967        }
4968    }
4969
4970    fn draw_frame(
4971        &mut self,
4972        frame: &mut Frame,
4973        device_size: Option<DeviceIntSize>,
4974        buffer_age: usize,
4975        results: &mut RenderResults,
4976    ) {
4977        profile_scope!("draw_frame");
4978
4979        // These markers seem to crash a lot on Android, see bug 1559834
4980        #[cfg(not(target_os = "android"))]
4981        let _gm = self.gpu_profiler.start_marker("draw frame");
4982
4983        if frame.passes.is_empty() {
4984            frame.has_been_rendered = true;
4985            return;
4986        }
4987
4988        self.device.disable_depth_write();
4989        self.set_blend(false, FramebufferKind::Other);
4990        self.device.disable_stencil();
4991
4992        self.bind_frame_data(frame);
4993
4994        // Upload experimental GPU buffer texture if there is any data present
4995        // TODO: Recycle these textures, upload via PBO or best approach for platform
4996        let gpu_buffer_texture_f = self.create_gpu_buffer_texture(
4997            &frame.gpu_buffer_f,
4998            TextureSampler::GpuBufferF,
4999        );
5000        let gpu_buffer_texture_i = self.create_gpu_buffer_texture(
5001            &frame.gpu_buffer_i,
5002            TextureSampler::GpuBufferI,
5003        );
5004
5005        let bytes_to_mb = 1.0 / 1000000.0;
5006        let gpu_buffer_bytes_f = gpu_buffer_texture_f
5007            .as_ref()
5008            .map(|tex| tex.size_in_bytes())
5009            .unwrap_or(0);
5010        let gpu_buffer_bytes_i = gpu_buffer_texture_i
5011            .as_ref()
5012            .map(|tex| tex.size_in_bytes())
5013            .unwrap_or(0);
5014        let gpu_buffer_mb = (gpu_buffer_bytes_f + gpu_buffer_bytes_i) as f32 * bytes_to_mb;
5015        self.profile.set(profiler::GPU_BUFFER_MEM, gpu_buffer_mb);
5016
5017        let gpu_cache_bytes = self.gpu_cache_texture.gpu_size_in_bytes();
5018        let gpu_cache_mb = gpu_cache_bytes as f32 * bytes_to_mb;
5019        self.profile.set(profiler::GPU_CACHE_MEM, gpu_cache_mb);
5020
5021        // Determine the present mode and dirty rects, if device_size
5022        // is Some(..). If it's None, no composite will occur and only
5023        // picture cache and texture cache targets will be updated.
5024        // TODO(gw): Split Frame so that it's clearer when a composite
5025        //           is occurring.
5026        let present_mode = device_size.and_then(|device_size| {
5027            self.calculate_dirty_rects(
5028                buffer_age,
5029                &frame.composite_state,
5030                device_size,
5031                results,
5032            )
5033        });
5034
5035        // If we have a native OS compositor, then make use of that interface to
5036        // specify how to composite each of the picture cache surfaces. First, we
5037        // need to find each tile that may be bound and updated later in the frame
5038        // and invalidate it so that the native render compositor knows that these
5039        // tiles can't be composited early. Next, after all such tiles have been
5040        // invalidated, then we queue surfaces for native composition by the render
5041        // compositor before we actually update the tiles. This allows the render
5042        // compositor to start early composition while the tiles are updating.
5043        if let CompositorKind::Native { .. } = self.current_compositor_kind {
5044            let compositor = self.compositor_config.compositor().unwrap();
5045            // Invalidate any native surface tiles that might be updated by passes.
5046            if !frame.has_been_rendered {
5047                for tile in &frame.composite_state.tiles {
5048                    if tile.kind == TileKind::Clear {
5049                        continue;
5050                    }
5051                    if !tile.local_dirty_rect.is_empty() {
5052                        if let CompositeTileSurface::Texture { surface: ResolvedSurfaceTexture::Native { id, .. } } = tile.surface {
5053                            let valid_rect = frame.composite_state.get_surface_rect(
5054                                &tile.local_valid_rect,
5055                                &tile.local_rect,
5056                                tile.transform_index,
5057                            ).to_i32();
5058
5059                            compositor.invalidate_tile(&mut self.device, id, valid_rect);
5060                        }
5061                    }
5062                }
5063            }
5064            // Ensure any external surfaces that might be used during early composition
5065            // are invalidated first so that the native compositor can properly schedule
5066            // composition to happen only when the external surface is updated.
5067            // See update_external_native_surfaces for more details.
5068            for surface in &frame.composite_state.external_surfaces {
5069                if let Some((native_surface_id, size)) = surface.update_params {
5070                    let surface_rect = size.into();
5071                    compositor.invalidate_tile(&mut self.device, NativeTileId { surface_id: native_surface_id, x: 0, y: 0 }, surface_rect);
5072                }
5073            }
5074            // Finally queue native surfaces for early composition, if applicable. By now,
5075            // we have already invalidated any tiles that such surfaces may depend upon, so
5076            // the native render compositor can keep track of when to actually schedule
5077            // composition as surfaces are updated.
5078            if device_size.is_some() {
5079                frame.composite_state.composite_native(
5080                    self.clear_color,
5081                    &results.dirty_rects,
5082                    &mut self.device,
5083                    &mut **compositor,
5084                );
5085            }
5086        }
5087
5088        for (_pass_index, pass) in frame.passes.iter_mut().enumerate() {
5089            #[cfg(not(target_os = "android"))]
5090            let _gm = self.gpu_profiler.start_marker(&format!("pass {}", _pass_index));
5091
5092            profile_scope!("offscreen target");
5093
5094            // If this frame has already been drawn, then any texture
5095            // cache targets have already been updated and can be
5096            // skipped this time.
5097            if !frame.has_been_rendered {
5098                for (&texture_id, target) in &pass.texture_cache {
5099                    self.draw_render_target(
5100                        texture_id,
5101                        target,
5102                        &frame.render_tasks,
5103                        &mut results.stats,
5104                    );
5105                }
5106
5107                if !pass.picture_cache.is_empty() {
5108                    self.profile.inc(profiler::COLOR_PASSES);
5109                }
5110
5111                // Draw picture caching tiles for this pass.
5112                for picture_target in &pass.picture_cache {
5113                    results.stats.color_target_count += 1;
5114
5115                    let draw_target = match picture_target.surface {
5116                        ResolvedSurfaceTexture::TextureCache { ref texture } => {
5117                            let (texture, _) = self.texture_resolver
5118                                .resolve(texture)
5119                                .expect("bug");
5120
5121                            DrawTarget::from_texture(
5122                                texture,
5123                                true,
5124                            )
5125                        }
5126                        ResolvedSurfaceTexture::Native { id, size } => {
5127                            let surface_info = match self.current_compositor_kind {
5128                                CompositorKind::Native { .. } => {
5129                                    let compositor = self.compositor_config.compositor().unwrap();
5130                                    compositor.bind(
5131                                        &mut self.device,
5132                                        id,
5133                                        picture_target.dirty_rect,
5134                                        picture_target.valid_rect,
5135                                    )
5136                                }
5137                                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
5138                                    unreachable!();
5139                                }
5140                            };
5141
5142                            DrawTarget::NativeSurface {
5143                                offset: surface_info.origin,
5144                                external_fbo_id: surface_info.fbo_id,
5145                                dimensions: size,
5146                            }
5147                        }
5148                    };
5149
5150                    let projection = Transform3D::ortho(
5151                        0.0,
5152                        draw_target.dimensions().width as f32,
5153                        0.0,
5154                        draw_target.dimensions().height as f32,
5155                        self.device.ortho_near_plane(),
5156                        self.device.ortho_far_plane(),
5157                    );
5158
5159                    self.draw_picture_cache_target(
5160                        picture_target,
5161                        draw_target,
5162                        &projection,
5163                        &frame.render_tasks,
5164                        &mut results.stats,
5165                    );
5166
5167                    // Native OS surfaces must be unbound at the end of drawing to them
5168                    if let ResolvedSurfaceTexture::Native { .. } = picture_target.surface {
5169                        match self.current_compositor_kind {
5170                            CompositorKind::Native { .. } => {
5171                                let compositor = self.compositor_config.compositor().unwrap();
5172                                compositor.unbind(&mut self.device);
5173                            }
5174                            CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
5175                                unreachable!();
5176                            }
5177                        }
5178                    }
5179                }
5180            }
5181
5182            for target in &pass.alpha.targets {
5183                results.stats.alpha_target_count += 1;
5184                self.draw_render_target(
5185                    target.texture_id(),
5186                    target,
5187                    &frame.render_tasks,
5188                    &mut results.stats,
5189                );
5190            }
5191
5192            for target in &pass.color.targets {
5193                results.stats.color_target_count += 1;
5194                self.draw_render_target(
5195                    target.texture_id(),
5196                    target,
5197                    &frame.render_tasks,
5198                    &mut results.stats,
5199                );
5200            }
5201
5202            // Only end the pass here and invalidate previous textures for
5203            // off-screen targets. Deferring return of the inputs to the
5204            // frame buffer until the implicit end_pass in end_frame allows
5205            // debug draw overlays to be added without triggering a copy
5206            // resolve stage in mobile / tiled GPUs.
5207            self.texture_resolver.end_pass(
5208                &mut self.device,
5209                &pass.textures_to_invalidate,
5210            );
5211        }
5212
5213        self.composite_frame(
5214            frame,
5215            device_size,
5216            results,
5217            present_mode,
5218        );
5219
5220        if let Some(gpu_buffer_texture_f) = gpu_buffer_texture_f {
5221            self.device.delete_texture(gpu_buffer_texture_f);
5222        }
5223        if let Some(gpu_buffer_texture_i) = gpu_buffer_texture_i {
5224            self.device.delete_texture(gpu_buffer_texture_i);
5225        }
5226
5227        frame.has_been_rendered = true;
5228    }
5229
5230    fn composite_frame(
5231        &mut self,
5232        frame: &mut Frame,
5233        device_size: Option<DeviceIntSize>,
5234        results: &mut RenderResults,
5235        present_mode: Option<PartialPresentMode>,
5236    ) {
5237        profile_scope!("main target");
5238
5239        if let Some(device_size) = device_size {
5240            results.stats.color_target_count += 1;
5241            results.picture_cache_debug = mem::replace(
5242                &mut frame.composite_state.picture_cache_debug,
5243                PictureCacheDebugInfo::new(),
5244            );
5245
5246            let size = frame.device_rect.size().to_f32();
5247            let surface_origin_is_top_left = self.device.surface_origin_is_top_left();
5248            let (bottom, top) = if surface_origin_is_top_left {
5249              (0.0, size.height)
5250            } else {
5251              (size.height, 0.0)
5252            };
5253
5254            let projection = Transform3D::ortho(
5255                0.0,
5256                size.width,
5257                bottom,
5258                top,
5259                self.device.ortho_near_plane(),
5260                self.device.ortho_far_plane(),
5261            );
5262
5263            let fb_scale = Scale::<_, _, FramebufferPixel>::new(1i32);
5264            let mut fb_rect = frame.device_rect * fb_scale;
5265
5266            if !surface_origin_is_top_left {
5267                let h = fb_rect.height();
5268                fb_rect.min.y = device_size.height - fb_rect.max.y;
5269                fb_rect.max.y = fb_rect.min.y + h;
5270            }
5271
5272            let draw_target = DrawTarget::Default {
5273                rect: fb_rect,
5274                total_size: device_size * fb_scale,
5275                surface_origin_is_top_left,
5276            };
5277
5278            // If we have a native OS compositor, then make use of that interface
5279            // to specify how to composite each of the picture cache surfaces.
5280            match self.current_compositor_kind {
5281                CompositorKind::Native { .. } => {
5282                    // We have already queued surfaces for early native composition by this point.
5283                    // All that is left is to finally update any external native surfaces that were
5284                    // invalidated so that composition can complete.
5285                    self.update_external_native_surfaces(
5286                        &frame.composite_state.external_surfaces,
5287                        results,
5288                    );
5289                }
5290                CompositorKind::Draw { .. } | CompositorKind::Layer { .. } => {
5291                    self.composite_simple(
5292                        &frame.composite_state,
5293                        frame.device_rect.size(),
5294                        draw_target,
5295                        &projection,
5296                        results,
5297                        present_mode,
5298                        device_size,
5299                    );
5300                }
5301            }
5302        } else {
5303            // Rendering a frame without presenting it will confuse the partial
5304            // present logic, so force a full present for the next frame.
5305            self.force_redraw();
5306        }
5307    }
5308
5309    pub fn debug_renderer(&mut self) -> Option<&mut DebugRenderer> {
5310        self.debug.get_mut(&mut self.device)
5311    }
5312
5313    pub fn get_debug_flags(&self) -> DebugFlags {
5314        self.debug_flags
5315    }
5316
5317    pub fn set_debug_flags(&mut self, flags: DebugFlags) {
5318        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_TIME_QUERIES) {
5319            if enabled {
5320                self.gpu_profiler.enable_timers();
5321            } else {
5322                self.gpu_profiler.disable_timers();
5323            }
5324        }
5325        if let Some(enabled) = flag_changed(self.debug_flags, flags, DebugFlags::GPU_SAMPLE_QUERIES) {
5326            if enabled {
5327                self.gpu_profiler.enable_samplers();
5328            } else {
5329                self.gpu_profiler.disable_samplers();
5330            }
5331        }
5332
5333        self.debug_flags = flags;
5334    }
5335
5336    pub fn set_profiler_ui(&mut self, ui_str: &str) {
5337        self.profiler.set_ui(ui_str);
5338    }
5339
5340    fn draw_frame_debug_items(&mut self, items: &[DebugItem]) {
5341        if items.is_empty() {
5342            return;
5343        }
5344
5345        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5346            Some(render) => render,
5347            None => return,
5348        };
5349
5350        for item in items {
5351            match item {
5352                DebugItem::Rect { rect, outer_color, inner_color, thickness } => {
5353                    if inner_color.a > 0.001 {
5354                        let rect = rect.inflate(-thickness as f32, -thickness as f32);
5355                        debug_renderer.add_quad(
5356                            rect.min.x,
5357                            rect.min.y,
5358                            rect.max.x,
5359                            rect.max.y,
5360                            (*inner_color).into(),
5361                            (*inner_color).into(),
5362                        );
5363                    }
5364
5365                    if outer_color.a > 0.001 {
5366                        debug_renderer.add_rect(
5367                            &rect.to_i32(),
5368                            *thickness,
5369                            (*outer_color).into(),
5370                        );
5371                    }
5372                }
5373                DebugItem::Text { ref msg, position, color } => {
5374                    debug_renderer.add_text(
5375                        position.x,
5376                        position.y,
5377                        msg,
5378                        (*color).into(),
5379                        None,
5380                    );
5381                }
5382            }
5383        }
5384    }
5385
5386    fn draw_render_target_debug(&mut self, draw_target: &DrawTarget) {
5387        if !self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) {
5388            return;
5389        }
5390
5391        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5392            Some(render) => render,
5393            None => return,
5394        };
5395
5396        let textures = self.texture_resolver
5397            .texture_cache_map
5398            .values()
5399            .filter(|item| item.category == TextureCacheCategory::RenderTarget)
5400            .map(|item| &item.texture)
5401            .collect::<Vec<&Texture>>();
5402
5403        Self::do_debug_blit(
5404            &mut self.device,
5405            debug_renderer,
5406            textures,
5407            draw_target,
5408            0,
5409            &|_| [0.0, 1.0, 0.0, 1.0], // Use green for all RTs.
5410        );
5411    }
5412
5413    fn draw_zoom_debug(
5414        &mut self,
5415        device_size: DeviceIntSize,
5416    ) {
5417        if !self.debug_flags.contains(DebugFlags::ZOOM_DBG) {
5418            return;
5419        }
5420
5421        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5422            Some(render) => render,
5423            None => return,
5424        };
5425
5426        let source_size = DeviceIntSize::new(64, 64);
5427        let target_size = DeviceIntSize::new(1024, 1024);
5428
5429        let source_origin = DeviceIntPoint::new(
5430            (self.cursor_position.x - source_size.width / 2)
5431                .min(device_size.width - source_size.width)
5432                .max(0),
5433            (self.cursor_position.y - source_size.height / 2)
5434                .min(device_size.height - source_size.height)
5435                .max(0),
5436        );
5437
5438        let source_rect = DeviceIntRect::from_origin_and_size(
5439            source_origin,
5440            source_size,
5441        );
5442
5443        let target_rect = DeviceIntRect::from_origin_and_size(
5444            DeviceIntPoint::new(
5445                device_size.width - target_size.width - 64,
5446                device_size.height - target_size.height - 64,
5447            ),
5448            target_size,
5449        );
5450
5451        let texture_rect = FramebufferIntRect::from_size(
5452            source_rect.size().cast_unit(),
5453        );
5454
5455        debug_renderer.add_rect(
5456            &target_rect.inflate(1, 1),
5457            1,
5458            debug_colors::RED.into(),
5459        );
5460
5461        if self.zoom_debug_texture.is_none() {
5462            let texture = self.device.create_texture(
5463                ImageBufferKind::Texture2D,
5464                ImageFormat::BGRA8,
5465                source_rect.width(),
5466                source_rect.height(),
5467                TextureFilter::Nearest,
5468                Some(RenderTargetInfo { has_depth: false }),
5469            );
5470
5471            self.zoom_debug_texture = Some(texture);
5472        }
5473
5474        // Copy frame buffer into the zoom texture
5475        let read_target = DrawTarget::new_default(device_size, self.device.surface_origin_is_top_left());
5476        self.device.blit_render_target(
5477            read_target.into(),
5478            read_target.to_framebuffer_rect(source_rect),
5479            DrawTarget::from_texture(
5480                self.zoom_debug_texture.as_ref().unwrap(),
5481                false,
5482            ),
5483            texture_rect,
5484            TextureFilter::Nearest,
5485        );
5486
5487        // Draw the zoom texture back to the framebuffer
5488        self.device.blit_render_target(
5489            ReadTarget::from_texture(
5490                self.zoom_debug_texture.as_ref().unwrap(),
5491            ),
5492            texture_rect,
5493            read_target,
5494            read_target.to_framebuffer_rect(target_rect),
5495            TextureFilter::Nearest,
5496        );
5497    }
5498
5499    fn draw_texture_cache_debug(&mut self, draw_target: &DrawTarget) {
5500        if !self.debug_flags.contains(DebugFlags::TEXTURE_CACHE_DBG) {
5501            return;
5502        }
5503
5504        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5505            Some(render) => render,
5506            None => return,
5507        };
5508
5509        let textures = self.texture_resolver
5510            .texture_cache_map
5511            .values()
5512            .filter(|item| item.category == TextureCacheCategory::Atlas)
5513            .map(|item| &item.texture)
5514            .collect::<Vec<&Texture>>();
5515
5516        fn select_color(texture: &Texture) -> [f32; 4] {
5517            if texture.flags().contains(TextureFlags::IS_SHARED_TEXTURE_CACHE) {
5518                [1.0, 0.5, 0.0, 1.0] // Orange for shared.
5519            } else {
5520                [1.0, 0.0, 1.0, 1.0] // Fuchsia for standalone.
5521            }
5522        }
5523
5524        Self::do_debug_blit(
5525            &mut self.device,
5526            debug_renderer,
5527            textures,
5528            draw_target,
5529            if self.debug_flags.contains(DebugFlags::RENDER_TARGET_DBG) { 544 } else { 0 },
5530            &select_color,
5531        );
5532    }
5533
5534    fn do_debug_blit(
5535        device: &mut Device,
5536        debug_renderer: &mut DebugRenderer,
5537        mut textures: Vec<&Texture>,
5538        draw_target: &DrawTarget,
5539        bottom: i32,
5540        select_color: &dyn Fn(&Texture) -> [f32; 4],
5541    ) {
5542        let mut spacing = 16;
5543        let mut size = 512;
5544
5545        let device_size = draw_target.dimensions();
5546        let fb_width = device_size.width;
5547        let fb_height = device_size.height;
5548        let surface_origin_is_top_left = draw_target.surface_origin_is_top_left();
5549
5550        let num_textures = textures.len() as i32;
5551
5552        if num_textures * (size + spacing) > fb_width {
5553            let factor = fb_width as f32 / (num_textures * (size + spacing)) as f32;
5554            size = (size as f32 * factor) as i32;
5555            spacing = (spacing as f32 * factor) as i32;
5556        }
5557
5558        let text_height = 14; // Visually approximated.
5559        let text_margin = 1;
5560        let tag_height = text_height + text_margin * 2;
5561        let tag_y = fb_height - (bottom + spacing + tag_height);
5562        let image_y = tag_y - size;
5563
5564        // Sort the display by size (in bytes), so that left-to-right is
5565        // largest-to-smallest.
5566        //
5567        // Note that the vec here is in increasing order, because the elements
5568        // get drawn right-to-left.
5569        textures.sort_by_key(|t| t.size_in_bytes());
5570
5571        let mut i = 0;
5572        for texture in textures.iter() {
5573            let dimensions = texture.get_dimensions();
5574            let src_rect = FramebufferIntRect::from_size(
5575                FramebufferIntSize::new(dimensions.width as i32, dimensions.height as i32),
5576            );
5577
5578            let x = fb_width - (spacing + size) * (i as i32 + 1);
5579
5580            // If we have more targets than fit on one row in screen, just early exit.
5581            if x > fb_width {
5582                return;
5583            }
5584
5585            // Draw the info tag.
5586            let tag_rect = rect(x, tag_y, size, tag_height).to_box2d();
5587            let tag_color = select_color(texture);
5588            device.clear_target(
5589                Some(tag_color),
5590                None,
5591                Some(draw_target.to_framebuffer_rect(tag_rect)),
5592            );
5593
5594            // Draw the dimensions onto the tag.
5595            let dim = texture.get_dimensions();
5596            let text_rect = tag_rect.inflate(-text_margin, -text_margin);
5597            debug_renderer.add_text(
5598                text_rect.min.x as f32,
5599                text_rect.max.y as f32, // Top-relative.
5600                &format!("{}x{}", dim.width, dim.height),
5601                ColorU::new(0, 0, 0, 255),
5602                Some(tag_rect.to_f32())
5603            );
5604
5605            // Blit the contents of the texture.
5606            let dest_rect = draw_target.to_framebuffer_rect(rect(x, image_y, size, size).to_box2d());
5607            let read_target = ReadTarget::from_texture(texture);
5608
5609            if surface_origin_is_top_left {
5610                device.blit_render_target(
5611                    read_target,
5612                    src_rect,
5613                    *draw_target,
5614                    dest_rect,
5615                    TextureFilter::Linear,
5616                );
5617            } else {
5618                 // Invert y.
5619                 device.blit_render_target_invert_y(
5620                    read_target,
5621                    src_rect,
5622                    *draw_target,
5623                    dest_rect,
5624                );
5625            }
5626            i += 1;
5627        }
5628    }
5629
5630    fn draw_epoch_debug(&mut self) {
5631        if !self.debug_flags.contains(DebugFlags::EPOCHS) {
5632            return;
5633        }
5634
5635        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5636            Some(render) => render,
5637            None => return,
5638        };
5639
5640        let dy = debug_renderer.line_height();
5641        let x0: f32 = 30.0;
5642        let y0: f32 = 30.0;
5643        let mut y = y0;
5644        let mut text_width = 0.0;
5645        for ((pipeline, document_id), epoch) in  &self.pipeline_info.epochs {
5646            y += dy;
5647            let w = debug_renderer.add_text(
5648                x0, y,
5649                &format!("({:?}, {:?}): {:?}", pipeline, document_id, epoch),
5650                ColorU::new(255, 255, 0, 255),
5651                None,
5652            ).size.width;
5653            text_width = f32::max(text_width, w);
5654        }
5655
5656        let margin = 10.0;
5657        debug_renderer.add_quad(
5658            x0 - margin,
5659            y0 - margin,
5660            x0 + text_width + margin,
5661            y + margin,
5662            ColorU::new(25, 25, 25, 200),
5663            ColorU::new(51, 51, 51, 200),
5664        );
5665    }
5666
5667    fn draw_window_visibility_debug(&mut self) {
5668        if !self.debug_flags.contains(DebugFlags::WINDOW_VISIBILITY_DBG) {
5669            return;
5670        }
5671
5672        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5673            Some(render) => render,
5674            None => return,
5675        };
5676
5677        let x: f32 = 30.0;
5678        let y: f32 = 40.0;
5679
5680        if let CompositorConfig::Native { ref mut compositor, .. } = self.compositor_config {
5681            let visibility = compositor.get_window_visibility(&mut self.device);
5682            let color = if visibility.is_fully_occluded {
5683                ColorU::new(255, 0, 0, 255)
5684
5685            } else {
5686                ColorU::new(0, 0, 255, 255)
5687            };
5688
5689            debug_renderer.add_text(
5690                x, y,
5691                &format!("{:?}", visibility),
5692                color,
5693                None,
5694            );
5695        }
5696
5697
5698    }
5699
5700    fn draw_gpu_cache_debug(&mut self, device_size: DeviceIntSize) {
5701        if !self.debug_flags.contains(DebugFlags::GPU_CACHE_DBG) {
5702            return;
5703        }
5704
5705        let debug_renderer = match self.debug.get_mut(&mut self.device) {
5706            Some(render) => render,
5707            None => return,
5708        };
5709
5710        let (x_off, y_off) = (30f32, 30f32);
5711        let height = self.gpu_cache_texture.get_height()
5712            .min(device_size.height - (y_off as i32) * 2) as usize;
5713        debug_renderer.add_quad(
5714            x_off,
5715            y_off,
5716            x_off + MAX_VERTEX_TEXTURE_WIDTH as f32,
5717            y_off + height as f32,
5718            ColorU::new(80, 80, 80, 80),
5719            ColorU::new(80, 80, 80, 80),
5720        );
5721
5722        let upper = self.gpu_cache_debug_chunks.len().min(height);
5723        for chunk in self.gpu_cache_debug_chunks[0..upper].iter().flatten() {
5724            let color = ColorU::new(250, 0, 0, 200);
5725            debug_renderer.add_quad(
5726                x_off + chunk.address.u as f32,
5727                y_off + chunk.address.v as f32,
5728                x_off + chunk.address.u as f32 + chunk.size as f32,
5729                y_off + chunk.address.v as f32 + 1.0,
5730                color,
5731                color,
5732            );
5733        }
5734    }
5735
5736    /// Pass-through to `Device::read_pixels_into`, used by Gecko's WR bindings.
5737    pub fn read_pixels_into(&mut self, rect: FramebufferIntRect, format: ImageFormat, output: &mut [u8]) {
5738        self.device.read_pixels_into(rect, format, output);
5739    }
5740
5741    pub fn read_pixels_rgba8(&mut self, rect: FramebufferIntRect) -> Vec<u8> {
5742        let mut pixels = vec![0; (rect.area() * 4) as usize];
5743        self.device.read_pixels_into(rect, ImageFormat::RGBA8, &mut pixels);
5744        pixels
5745    }
5746
5747    // De-initialize the Renderer safely, assuming the GL is still alive and active.
5748    pub fn deinit(mut self) {
5749        //Note: this is a fake frame, only needed because texture deletion is require to happen inside a frame
5750        self.device.begin_frame();
5751        // If we are using a native compositor, ensure that any remaining native
5752        // surfaces are freed.
5753        if let CompositorConfig::Native { mut compositor, .. } = self.compositor_config {
5754            for id in self.allocated_native_surfaces.drain() {
5755                compositor.destroy_surface(&mut self.device, id);
5756            }
5757            // Destroy the debug overlay surface, if currently allocated.
5758            if self.debug_overlay_state.current_size.is_some() {
5759                compositor.destroy_surface(&mut self.device, NativeSurfaceId::DEBUG_OVERLAY);
5760            }
5761            compositor.deinit(&mut self.device);
5762        }
5763        self.gpu_cache_texture.deinit(&mut self.device);
5764        if let Some(dither_matrix_texture) = self.dither_matrix_texture {
5765            self.device.delete_texture(dither_matrix_texture);
5766        }
5767        if let Some(zoom_debug_texture) = self.zoom_debug_texture {
5768            self.device.delete_texture(zoom_debug_texture);
5769        }
5770        for textures in self.vertex_data_textures.drain(..) {
5771            textures.deinit(&mut self.device);
5772        }
5773        self.texture_upload_pbo_pool.deinit(&mut self.device);
5774        self.staging_texture_pool.delete_textures(&mut self.device);
5775        self.texture_resolver.deinit(&mut self.device);
5776        self.vaos.deinit(&mut self.device);
5777        self.debug.deinit(&mut self.device);
5778
5779        if let Ok(shaders) = Rc::try_unwrap(self.shaders) {
5780            shaders.into_inner().deinit(&mut self.device);
5781        }
5782
5783        if let Some(async_screenshots) = self.async_screenshots.take() {
5784            async_screenshots.deinit(&mut self.device);
5785        }
5786
5787        if let Some(async_frame_recorder) = self.async_frame_recorder.take() {
5788            async_frame_recorder.deinit(&mut self.device);
5789        }
5790
5791        #[cfg(feature = "capture")]
5792        self.device.delete_fbo(self.read_fbo);
5793        #[cfg(feature = "replay")]
5794        for (_, ext) in self.owned_external_images {
5795            self.device.delete_external_texture(ext);
5796        }
5797        self.device.end_frame();
5798    }
5799
5800    /// Collects a memory report.
5801    pub fn report_memory(&self, swgl: *mut c_void) -> MemoryReport {
5802        let mut report = MemoryReport::default();
5803
5804        // GPU cache CPU memory.
5805        self.gpu_cache_texture.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5806
5807        self.staging_texture_pool.report_memory_to(&mut report, self.size_of_ops.as_ref().unwrap());
5808
5809        // Render task CPU memory.
5810        for (_id, doc) in &self.active_documents {
5811            let frame_alloc_stats = doc.frame.allocator_memory.get_stats();
5812            report.frame_allocator += frame_alloc_stats.reserved_bytes;
5813            report.render_tasks += doc.frame.render_tasks.report_memory();
5814        }
5815
5816        // Vertex data GPU memory.
5817        for textures in &self.vertex_data_textures {
5818            report.vertex_data_textures += textures.size_in_bytes();
5819        }
5820
5821        // Texture cache and render target GPU memory.
5822        report += self.texture_resolver.report_memory();
5823
5824        // Texture upload PBO memory.
5825        report += self.texture_upload_pbo_pool.report_memory();
5826
5827        // Textures held internally within the device layer.
5828        report += self.device.report_memory(self.size_of_ops.as_ref().unwrap(), swgl);
5829
5830        report
5831    }
5832
5833    // Sets the blend mode. Blend is unconditionally set if the "show overdraw" debugging mode is
5834    // enabled.
5835    fn set_blend(&mut self, mut blend: bool, framebuffer_kind: FramebufferKind) {
5836        if framebuffer_kind == FramebufferKind::Main &&
5837                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5838            blend = true
5839        }
5840        self.device.set_blend(blend)
5841    }
5842
5843    fn set_blend_mode_multiply(&mut self, framebuffer_kind: FramebufferKind) {
5844        if framebuffer_kind == FramebufferKind::Main &&
5845                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5846            self.device.set_blend_mode_show_overdraw();
5847        } else {
5848            self.device.set_blend_mode_multiply();
5849        }
5850    }
5851
5852    fn set_blend_mode_premultiplied_alpha(&mut self, framebuffer_kind: FramebufferKind) {
5853        if framebuffer_kind == FramebufferKind::Main &&
5854                self.debug_flags.contains(DebugFlags::SHOW_OVERDRAW) {
5855            self.device.set_blend_mode_show_overdraw();
5856        } else {
5857            self.device.set_blend_mode_premultiplied_alpha();
5858        }
5859    }
5860
5861    /// Clears the texture with a given color.
5862    fn clear_texture(&mut self, texture: &Texture, color: [f32; 4]) {
5863        self.device.bind_draw_target(DrawTarget::from_texture(
5864            &texture,
5865            false,
5866        ));
5867        self.device.clear_target(Some(color), None, None);
5868    }
5869}
5870
5871bitflags! {
5872    /// Flags that control how shaders are pre-cached, if at all.
5873    #[derive(Default, Debug, Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
5874    pub struct ShaderPrecacheFlags: u32 {
5875        /// Needed for const initialization
5876        const EMPTY                 = 0;
5877
5878        /// Only start async compile
5879        const ASYNC_COMPILE         = 1 << 2;
5880
5881        /// Do a full compile/link during startup
5882        const FULL_COMPILE          = 1 << 3;
5883    }
5884}
5885
5886/// The cumulative times spent in each painting phase to generate this frame.
5887#[derive(Debug, Default)]
5888pub struct FullFrameStats {
5889    pub full_display_list: bool,
5890    pub gecko_display_list_time: f64,
5891    pub wr_display_list_time: f64,
5892    pub scene_build_time: f64,
5893    pub frame_build_time: f64,
5894}
5895
5896impl FullFrameStats {
5897    pub fn merge(&self, other: &FullFrameStats) -> Self {
5898        Self {
5899            full_display_list: self.full_display_list || other.full_display_list,
5900            gecko_display_list_time: self.gecko_display_list_time + other.gecko_display_list_time,
5901            wr_display_list_time: self.wr_display_list_time + other.wr_display_list_time,
5902            scene_build_time: self.scene_build_time + other.scene_build_time,
5903            frame_build_time: self.frame_build_time + other.frame_build_time
5904        }
5905    }
5906
5907    pub fn total(&self) -> f64 {
5908      self.gecko_display_list_time + self.wr_display_list_time + self.scene_build_time + self.frame_build_time
5909    }
5910}
5911
5912/// Some basic statistics about the rendered scene, used in Gecko, as
5913/// well as in wrench reftests to ensure that tests are batching and/or
5914/// allocating on render targets as we expect them to.
5915#[repr(C)]
5916#[derive(Debug, Default)]
5917pub struct RendererStats {
5918    pub total_draw_calls: usize,
5919    pub alpha_target_count: usize,
5920    pub color_target_count: usize,
5921    pub texture_upload_mb: f64,
5922    pub resource_upload_time: f64,
5923    pub gpu_cache_upload_time: f64,
5924    pub gecko_display_list_time: f64,
5925    pub wr_display_list_time: f64,
5926    pub scene_build_time: f64,
5927    pub frame_build_time: f64,
5928    pub full_display_list: bool,
5929    pub full_paint: bool,
5930}
5931
5932impl RendererStats {
5933    pub fn merge(&mut self, stats: &FullFrameStats) {
5934        self.gecko_display_list_time = stats.gecko_display_list_time;
5935        self.wr_display_list_time = stats.wr_display_list_time;
5936        self.scene_build_time = stats.scene_build_time;
5937        self.frame_build_time = stats.frame_build_time;
5938        self.full_display_list = stats.full_display_list;
5939        self.full_paint = true;
5940    }
5941}
5942
5943/// Return type from render(), which contains some repr(C) statistics as well as
5944/// some non-repr(C) data.
5945#[derive(Debug, Default)]
5946pub struct RenderResults {
5947    /// Statistics about the frame that was rendered.
5948    pub stats: RendererStats,
5949
5950    /// A list of the device dirty rects that were updated
5951    /// this frame.
5952    /// TODO(gw): This is an initial interface, likely to change in future.
5953    /// TODO(gw): The dirty rects here are currently only useful when scrolling
5954    ///           is not occurring. They are still correct in the case of
5955    ///           scrolling, but will be very large (until we expose proper
5956    ///           OS compositor support where the dirty rects apply to a
5957    ///           specific picture cache slice / OS compositor surface).
5958    pub dirty_rects: Vec<DeviceIntRect>,
5959
5960    /// Information about the state of picture cache tiles. This is only
5961    /// allocated and stored if config.testing is true (such as wrench)
5962    pub picture_cache_debug: PictureCacheDebugInfo,
5963}
5964
5965#[cfg(any(feature = "capture", feature = "replay"))]
5966#[cfg_attr(feature = "capture", derive(Serialize))]
5967#[cfg_attr(feature = "replay", derive(Deserialize))]
5968struct PlainTexture {
5969    data: String,
5970    size: DeviceIntSize,
5971    format: ImageFormat,
5972    filter: TextureFilter,
5973    has_depth: bool,
5974    category: Option<TextureCacheCategory>,
5975}
5976
5977
5978#[cfg(any(feature = "capture", feature = "replay"))]
5979#[cfg_attr(feature = "capture", derive(Serialize))]
5980#[cfg_attr(feature = "replay", derive(Deserialize))]
5981struct PlainRenderer {
5982    device_size: Option<DeviceIntSize>,
5983    gpu_cache: PlainTexture,
5984    gpu_cache_frame_id: FrameId,
5985    textures: FastHashMap<CacheTextureId, PlainTexture>,
5986}
5987
5988#[cfg(any(feature = "capture", feature = "replay"))]
5989#[cfg_attr(feature = "capture", derive(Serialize))]
5990#[cfg_attr(feature = "replay", derive(Deserialize))]
5991struct PlainExternalResources {
5992    images: Vec<ExternalCaptureImage>
5993}
5994
5995#[cfg(feature = "replay")]
5996enum CapturedExternalImageData {
5997    NativeTexture(gl::GLuint),
5998    Buffer(Arc<Vec<u8>>),
5999}
6000
6001#[cfg(feature = "replay")]
6002struct DummyExternalImageHandler {
6003    data: FastHashMap<(ExternalImageId, u8), (CapturedExternalImageData, TexelRect)>,
6004}
6005
6006#[cfg(feature = "replay")]
6007impl ExternalImageHandler for DummyExternalImageHandler {
6008    fn lock(&mut self, key: ExternalImageId, channel_index: u8) -> ExternalImage {
6009        let (ref captured_data, ref uv) = self.data[&(key, channel_index)];
6010        ExternalImage {
6011            uv: *uv,
6012            source: match *captured_data {
6013                CapturedExternalImageData::NativeTexture(tid) => ExternalImageSource::NativeTexture(tid),
6014                CapturedExternalImageData::Buffer(ref arc) => ExternalImageSource::RawData(&*arc),
6015            }
6016        }
6017    }
6018    fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {}
6019}
6020
6021#[derive(Default)]
6022pub struct PipelineInfo {
6023    pub epochs: FastHashMap<(PipelineId, DocumentId), Epoch>,
6024    pub removed_pipelines: Vec<(PipelineId, DocumentId)>,
6025}
6026
6027impl Renderer {
6028    #[cfg(feature = "capture")]
6029    fn save_texture(
6030        texture: &Texture, category: Option<TextureCacheCategory>, name: &str, root: &PathBuf, device: &mut Device
6031    ) -> PlainTexture {
6032        use std::fs;
6033        use std::io::Write;
6034
6035        let short_path = format!("textures/{}.raw", name);
6036
6037        let bytes_per_pixel = texture.get_format().bytes_per_pixel();
6038        let read_format = texture.get_format();
6039        let rect_size = texture.get_dimensions();
6040
6041        let mut file = fs::File::create(root.join(&short_path))
6042            .expect(&format!("Unable to create {}", short_path));
6043        let bytes_per_texture = (rect_size.width * rect_size.height * bytes_per_pixel) as usize;
6044        let mut data = vec![0; bytes_per_texture];
6045
6046        //TODO: instead of reading from an FBO with `read_pixels*`, we could
6047        // read from textures directly with `get_tex_image*`.
6048
6049        let rect = device_size_as_framebuffer_size(rect_size).into();
6050
6051        device.attach_read_texture(texture);
6052        #[cfg(feature = "png")]
6053        {
6054            let mut png_data;
6055            let (data_ref, format) = match texture.get_format() {
6056                ImageFormat::RGBAF32 => {
6057                    png_data = vec![0; (rect_size.width * rect_size.height * 4) as usize];
6058                    device.read_pixels_into(rect, ImageFormat::RGBA8, &mut png_data);
6059                    (&png_data, ImageFormat::RGBA8)
6060                }
6061                fm => (&data, fm),
6062            };
6063            CaptureConfig::save_png(
6064                root.join(format!("textures/{}-{}.png", name, 0)),
6065                rect_size, format,
6066                None,
6067                data_ref,
6068            );
6069        }
6070        device.read_pixels_into(rect, read_format, &mut data);
6071        file.write_all(&data)
6072            .unwrap();
6073
6074        PlainTexture {
6075            data: short_path,
6076            size: rect_size,
6077            format: texture.get_format(),
6078            filter: texture.get_filter(),
6079            has_depth: texture.supports_depth(),
6080            category,
6081        }
6082    }
6083
6084    #[cfg(feature = "replay")]
6085    fn load_texture(
6086        target: ImageBufferKind,
6087        plain: &PlainTexture,
6088        rt_info: Option<RenderTargetInfo>,
6089        root: &PathBuf,
6090        device: &mut Device
6091    ) -> (Texture, Vec<u8>)
6092    {
6093        use std::fs::File;
6094        use std::io::Read;
6095
6096        let mut texels = Vec::new();
6097        File::open(root.join(&plain.data))
6098            .expect(&format!("Unable to open texture at {}", plain.data))
6099            .read_to_end(&mut texels)
6100            .unwrap();
6101
6102        let texture = device.create_texture(
6103            target,
6104            plain.format,
6105            plain.size.width,
6106            plain.size.height,
6107            plain.filter,
6108            rt_info,
6109        );
6110        device.upload_texture_immediate(&texture, &texels);
6111
6112        (texture, texels)
6113    }
6114
6115    #[cfg(feature = "capture")]
6116    fn save_capture(
6117        &mut self,
6118        config: CaptureConfig,
6119        deferred_images: Vec<ExternalCaptureImage>,
6120    ) {
6121        use std::fs;
6122        use std::io::Write;
6123        use api::ExternalImageData;
6124        use crate::render_api::CaptureBits;
6125
6126        let root = config.resource_root();
6127
6128        self.device.begin_frame();
6129        let _gm = self.gpu_profiler.start_marker("read GPU data");
6130        self.device.bind_read_target_impl(self.read_fbo, DeviceIntPoint::zero());
6131
6132        if config.bits.contains(CaptureBits::EXTERNAL_RESOURCES) && !deferred_images.is_empty() {
6133            info!("saving external images");
6134            let mut arc_map = FastHashMap::<*const u8, String>::default();
6135            let mut tex_map = FastHashMap::<u32, String>::default();
6136            let handler = self.external_image_handler
6137                .as_mut()
6138                .expect("Unable to lock the external image handler!");
6139            for def in &deferred_images {
6140                info!("\t{}", def.short_path);
6141                let ExternalImageData { id, channel_index, image_type, .. } = def.external;
6142                // The image rendering parameter is irrelevant because no filtering happens during capturing.
6143                let ext_image = handler.lock(id, channel_index);
6144                let (data, short_path) = match ext_image.source {
6145                    ExternalImageSource::RawData(data) => {
6146                        let arc_id = arc_map.len() + 1;
6147                        match arc_map.entry(data.as_ptr()) {
6148                            Entry::Occupied(e) => {
6149                                (None, e.get().clone())
6150                            }
6151                            Entry::Vacant(e) => {
6152                                let short_path = format!("externals/d{}.raw", arc_id);
6153                                (Some(data.to_vec()), e.insert(short_path).clone())
6154                            }
6155                        }
6156                    }
6157                    ExternalImageSource::NativeTexture(gl_id) => {
6158                        let tex_id = tex_map.len() + 1;
6159                        match tex_map.entry(gl_id) {
6160                            Entry::Occupied(e) => {
6161                                (None, e.get().clone())
6162                            }
6163                            Entry::Vacant(e) => {
6164                                let target = match image_type {
6165                                    ExternalImageType::TextureHandle(target) => target,
6166                                    ExternalImageType::Buffer => unreachable!(),
6167                                };
6168                                info!("\t\tnative texture of target {:?}", target);
6169                                self.device.attach_read_texture_external(gl_id, target);
6170                                let data = self.device.read_pixels(&def.descriptor);
6171                                let short_path = format!("externals/t{}.raw", tex_id);
6172                                (Some(data), e.insert(short_path).clone())
6173                            }
6174                        }
6175                    }
6176                    ExternalImageSource::Invalid => {
6177                        info!("\t\tinvalid source!");
6178                        (None, String::new())
6179                    }
6180                };
6181                if let Some(bytes) = data {
6182                    fs::File::create(root.join(&short_path))
6183                        .expect(&format!("Unable to create {}", short_path))
6184                        .write_all(&bytes)
6185                        .unwrap();
6186                    #[cfg(feature = "png")]
6187                    CaptureConfig::save_png(
6188                        root.join(&short_path).with_extension("png"),
6189                        def.descriptor.size,
6190                        def.descriptor.format,
6191                        def.descriptor.stride,
6192                        &bytes,
6193                    );
6194                }
6195                let plain = PlainExternalImage {
6196                    data: short_path,
6197                    external: def.external,
6198                    uv: ext_image.uv,
6199                };
6200                config.serialize_for_resource(&plain, &def.short_path);
6201            }
6202            for def in &deferred_images {
6203                handler.unlock(def.external.id, def.external.channel_index);
6204            }
6205            let plain_external = PlainExternalResources {
6206                images: deferred_images,
6207            };
6208            config.serialize_for_resource(&plain_external, "external_resources");
6209        }
6210
6211        if config.bits.contains(CaptureBits::FRAME) {
6212            let path_textures = root.join("textures");
6213            if !path_textures.is_dir() {
6214                fs::create_dir(&path_textures).unwrap();
6215            }
6216
6217            info!("saving GPU cache");
6218            self.update_gpu_cache(); // flush pending updates
6219            let mut plain_self = PlainRenderer {
6220                device_size: self.device_size,
6221                gpu_cache: Self::save_texture(
6222                    self.gpu_cache_texture.get_texture(),
6223                    None, "gpu", &root, &mut self.device,
6224                ),
6225                gpu_cache_frame_id: self.gpu_cache_frame_id,
6226                textures: FastHashMap::default(),
6227            };
6228
6229            info!("saving cached textures");
6230            for (id, item) in &self.texture_resolver.texture_cache_map {
6231                let file_name = format!("cache-{}", plain_self.textures.len() + 1);
6232                info!("\t{}", file_name);
6233                let plain = Self::save_texture(&item.texture, Some(item.category), &file_name, &root, &mut self.device);
6234                plain_self.textures.insert(*id, plain);
6235            }
6236
6237            config.serialize_for_resource(&plain_self, "renderer");
6238        }
6239
6240        self.device.reset_read_target();
6241        self.device.end_frame();
6242
6243        let mut stats_file = fs::File::create(config.root.join("profiler-stats.txt"))
6244            .expect(&format!("Unable to create profiler-stats.txt"));
6245        if self.debug_flags.intersects(DebugFlags::PROFILER_DBG | DebugFlags::PROFILER_CAPTURE) {
6246            self.profiler.dump_stats(&mut stats_file).unwrap();
6247        } else {
6248            writeln!(stats_file, "Turn on PROFILER_DBG or PROFILER_CAPTURE to get stats here!").unwrap();
6249        }
6250
6251        info!("done.");
6252    }
6253
6254    #[cfg(feature = "replay")]
6255    fn load_capture(
6256        &mut self,
6257        config: CaptureConfig,
6258        plain_externals: Vec<PlainExternalImage>,
6259    ) {
6260        use std::{fs::File, io::Read};
6261
6262        info!("loading external buffer-backed images");
6263        assert!(self.texture_resolver.external_images.is_empty());
6264        let mut raw_map = FastHashMap::<String, Arc<Vec<u8>>>::default();
6265        let mut image_handler = DummyExternalImageHandler {
6266            data: FastHashMap::default(),
6267        };
6268
6269        let root = config.resource_root();
6270
6271        // Note: this is a `SCENE` level population of the external image handlers
6272        // It would put both external buffers and texture into the map.
6273        // But latter are going to be overwritten later in this function
6274        // if we are in the `FRAME` level.
6275        for plain_ext in plain_externals {
6276            let data = match raw_map.entry(plain_ext.data) {
6277                Entry::Occupied(e) => e.get().clone(),
6278                Entry::Vacant(e) => {
6279                    let mut buffer = Vec::new();
6280                    File::open(root.join(e.key()))
6281                        .expect(&format!("Unable to open {}", e.key()))
6282                        .read_to_end(&mut buffer)
6283                        .unwrap();
6284                    e.insert(Arc::new(buffer)).clone()
6285                }
6286            };
6287            let ext = plain_ext.external;
6288            let value = (CapturedExternalImageData::Buffer(data), plain_ext.uv);
6289            image_handler.data.insert((ext.id, ext.channel_index), value);
6290        }
6291
6292        if let Some(external_resources) = config.deserialize_for_resource::<PlainExternalResources, _>("external_resources") {
6293            info!("loading external texture-backed images");
6294            let mut native_map = FastHashMap::<String, gl::GLuint>::default();
6295            for ExternalCaptureImage { short_path, external, descriptor } in external_resources.images {
6296                let target = match external.image_type {
6297                    ExternalImageType::TextureHandle(target) => target,
6298                    ExternalImageType::Buffer => continue,
6299                };
6300                let plain_ext = config.deserialize_for_resource::<PlainExternalImage, _>(&short_path)
6301                    .expect(&format!("Unable to read {}.ron", short_path));
6302                let key = (external.id, external.channel_index);
6303
6304                let tid = match native_map.entry(plain_ext.data) {
6305                    Entry::Occupied(e) => e.get().clone(),
6306                    Entry::Vacant(e) => {
6307                        let plain_tex = PlainTexture {
6308                            data: e.key().clone(),
6309                            size: descriptor.size,
6310                            format: descriptor.format,
6311                            filter: TextureFilter::Linear,
6312                            has_depth: false,
6313                            category: None,
6314                        };
6315                        let t = Self::load_texture(
6316                            target,
6317                            &plain_tex,
6318                            None,
6319                            &root,
6320                            &mut self.device
6321                        );
6322                        let extex = t.0.into_external();
6323                        self.owned_external_images.insert(key, extex.clone());
6324                        e.insert(extex.internal_id()).clone()
6325                    }
6326                };
6327
6328                let value = (CapturedExternalImageData::NativeTexture(tid), plain_ext.uv);
6329                image_handler.data.insert(key, value);
6330            }
6331        }
6332
6333        self.device.begin_frame();
6334        self.gpu_cache_texture.remove_texture(&mut self.device);
6335
6336        if let Some(renderer) = config.deserialize_for_resource::<PlainRenderer, _>("renderer") {
6337            info!("loading cached textures");
6338            self.device_size = renderer.device_size;
6339
6340            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
6341                self.device.delete_texture(item.texture);
6342            }
6343            for (id, texture) in renderer.textures {
6344                info!("\t{}", texture.data);
6345                let target = ImageBufferKind::Texture2D;
6346                let t = Self::load_texture(
6347                    target,
6348                    &texture,
6349                    Some(RenderTargetInfo { has_depth: texture.has_depth }),
6350                    &root,
6351                    &mut self.device
6352                );
6353                self.texture_resolver.texture_cache_map.insert(id, CacheTexture {
6354                    texture: t.0,
6355                    category: texture.category.unwrap_or(TextureCacheCategory::Standalone),
6356                });
6357            }
6358
6359            info!("loading gpu cache");
6360            let (t, gpu_cache_data) = Self::load_texture(
6361                ImageBufferKind::Texture2D,
6362                &renderer.gpu_cache,
6363                Some(RenderTargetInfo { has_depth: false }),
6364                &root,
6365                &mut self.device,
6366            );
6367            self.gpu_cache_texture.load_from_data(t, gpu_cache_data);
6368            self.gpu_cache_frame_id = renderer.gpu_cache_frame_id;
6369        } else {
6370            info!("loading cached textures");
6371            self.device.begin_frame();
6372            for (_id, item) in self.texture_resolver.texture_cache_map.drain() {
6373                self.device.delete_texture(item.texture);
6374            }
6375        }
6376        self.device.end_frame();
6377
6378        self.external_image_handler = Some(Box::new(image_handler) as Box<_>);
6379        info!("done.");
6380    }
6381}
6382
6383#[derive(Clone, Copy, PartialEq)]
6384enum FramebufferKind {
6385    Main,
6386    Other,
6387}
6388
6389fn should_skip_batch(kind: &BatchKind, flags: DebugFlags) -> bool {
6390    match kind {
6391        BatchKind::TextRun(_) => {
6392            flags.contains(DebugFlags::DISABLE_TEXT_PRIMS)
6393        }
6394        BatchKind::Brush(BrushBatchKind::LinearGradient) => {
6395            flags.contains(DebugFlags::DISABLE_GRADIENT_PRIMS)
6396        }
6397        _ => false,
6398    }
6399}
6400
6401impl CompositeState {
6402    /// Use the client provided native compositor interface to add all picture
6403    /// cache tiles to the OS compositor
6404    fn composite_native(
6405        &self,
6406        clear_color: ColorF,
6407        dirty_rects: &[DeviceIntRect],
6408        device: &mut Device,
6409        compositor: &mut dyn Compositor,
6410    ) {
6411        // Add each surface to the visual tree. z-order is implicit based on
6412        // order added. Offset and clip rect apply to all tiles within this
6413        // surface.
6414        for surface in &self.descriptor.surfaces {
6415            compositor.add_surface(
6416                device,
6417                surface.surface_id.expect("bug: no native surface allocated"),
6418                surface.transform,
6419                surface.clip_rect.to_i32(),
6420                surface.image_rendering,
6421                surface.rounded_clip_rect.to_i32(),
6422                surface.rounded_clip_radii,
6423            );
6424        }
6425        compositor.start_compositing(device, clear_color, dirty_rects, &[]);
6426    }
6427}
6428
6429mod tests {
6430    #[test]
6431    fn test_buffer_damage_tracker() {
6432        use super::BufferDamageTracker;
6433        use api::units::{DevicePoint, DeviceRect, DeviceSize};
6434
6435        let mut tracker = BufferDamageTracker::default();
6436        assert_eq!(tracker.get_damage_rect(0), None);
6437        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6438        assert_eq!(tracker.get_damage_rect(2), Some(DeviceRect::zero()));
6439        assert_eq!(tracker.get_damage_rect(3), Some(DeviceRect::zero()));
6440
6441        let damage1 = DeviceRect::from_origin_and_size(DevicePoint::new(10.0, 10.0), DeviceSize::new(10.0, 10.0));
6442        let damage2 = DeviceRect::from_origin_and_size(DevicePoint::new(20.0, 20.0), DeviceSize::new(10.0, 10.0));
6443        let combined = damage1.union(&damage2);
6444
6445        tracker.push_dirty_rect(&damage1);
6446        assert_eq!(tracker.get_damage_rect(0), None);
6447        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6448        assert_eq!(tracker.get_damage_rect(2), Some(damage1));
6449        assert_eq!(tracker.get_damage_rect(3), Some(damage1));
6450
6451        tracker.push_dirty_rect(&damage2);
6452        assert_eq!(tracker.get_damage_rect(0), None);
6453        assert_eq!(tracker.get_damage_rect(1), Some(DeviceRect::zero()));
6454        assert_eq!(tracker.get_damage_rect(2), Some(damage2));
6455        assert_eq!(tracker.get_damage_rect(3), Some(combined));
6456    }
6457}